1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/firmware/scmi/protocol.h>
8 #include <zephyr/drivers/firmware/scmi/transport.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/device.h>
11 #include "mailbox.h"
12
13 LOG_MODULE_REGISTER(scmi_core);
14
15 #define SCMI_CHAN_LOCK_TIMEOUT_USEC 500
16
scmi_status_to_errno(int scmi_status)17 int scmi_status_to_errno(int scmi_status)
18 {
19 switch (scmi_status) {
20 case SCMI_SUCCESS:
21 return 0;
22 case SCMI_NOT_SUPPORTED:
23 return -EOPNOTSUPP;
24 case SCMI_INVALID_PARAMETERS:
25 return -EINVAL;
26 case SCMI_DENIED:
27 return -EACCES;
28 case SCMI_NOT_FOUND:
29 return -ENOENT;
30 case SCMI_OUT_OF_RANGE:
31 return -ERANGE;
32 case SCMI_IN_USE:
33 case SCMI_BUSY:
34 return -EBUSY;
35 case SCMI_PROTOCOL_ERROR:
36 return -EPROTO;
37 case SCMI_COMMS_ERROR:
38 case SCMI_GENERIC_ERROR:
39 case SCMI_HARDWARE_ERROR:
40 default:
41 return -EIO;
42 }
43 }
44
scmi_core_reply_cb(struct scmi_channel * chan)45 static void scmi_core_reply_cb(struct scmi_channel *chan)
46 {
47 if (!k_is_pre_kernel()) {
48 k_sem_give(&chan->sem);
49 }
50 }
51
scmi_core_setup_chan(const struct device * transport,struct scmi_channel * chan,bool tx)52 static int scmi_core_setup_chan(const struct device *transport,
53 struct scmi_channel *chan, bool tx)
54 {
55 int ret;
56
57 if (!chan) {
58 return -EINVAL;
59 }
60
61 if (chan->ready) {
62 return 0;
63 }
64
65 /* no support for RX channels ATM */
66 if (!tx) {
67 return -ENOTSUP;
68 }
69
70 k_mutex_init(&chan->lock);
71 k_sem_init(&chan->sem, 0, 1);
72
73 chan->cb = scmi_core_reply_cb;
74
75 /* setup transport-related channel data */
76 ret = scmi_transport_setup_chan(transport, chan, tx);
77 if (ret < 0) {
78 LOG_ERR("failed to setup channel");
79 return ret;
80 }
81
82 /* protocols might share a channel. In such cases, this
83 * will stop them from being initialized again.
84 */
85 chan->ready = true;
86
87 return 0;
88 }
89
scmi_interrupt_enable(struct scmi_channel * chan,bool enable)90 static int scmi_interrupt_enable(struct scmi_channel *chan, bool enable)
91 {
92 struct scmi_mbox_channel *mbox_chan;
93 struct mbox_dt_spec *tx_reply;
94 bool comp_int;
95
96 mbox_chan = chan->data;
97 comp_int = enable ? SCMI_SHMEM_CHAN_FLAG_IRQ_BIT : 0;
98
99 if (mbox_chan->tx_reply.dev) {
100 tx_reply = &mbox_chan->tx_reply;
101 } else {
102 tx_reply = &mbox_chan->tx;
103 }
104
105 /* re-set completion interrupt */
106 scmi_shmem_update_flags(mbox_chan->shmem, SCMI_SHMEM_CHAN_FLAG_IRQ_BIT, comp_int);
107
108 return mbox_set_enabled_dt(tx_reply, enable);
109 }
110
scmi_send_message_polling(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)111 static int scmi_send_message_polling(struct scmi_protocol *proto,
112 struct scmi_message *msg,
113 struct scmi_message *reply)
114 {
115 int ret;
116 int status;
117
118 /*
119 * SCMI communication interrupt is enabled by default during setup_chan
120 * to support interrupt-driven communication. When using polling mode
121 * it must be disabled to avoid unnecessary interrupts and
122 * ensure proper polling behavior.
123 */
124 status = scmi_interrupt_enable(proto->tx, false);
125
126 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
127 if (ret < 0) {
128 goto cleanup;
129 }
130
131 /* no kernel primitives, we're forced to poll here.
132 *
133 * Cortex-M quirk: no interrupts at this point => no timer =>
134 * no timeout mechanism => this can block the whole system.
135 *
136 * Polling mode repeatedly checks the chan_status field in share memory
137 * to detect whether the remote side have completed message processing
138 *
139 * TODO: is there a better way to handle this?
140 */
141 while (!scmi_transport_channel_is_free(proto->transport, proto->tx)) {
142 }
143
144 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
145 if (ret < 0) {
146 return ret;
147 }
148
149 cleanup:
150 /* restore scmi interrupt enable status when disable it pass */
151 if (status >= 0) {
152 scmi_interrupt_enable(proto->tx, true);
153 }
154
155 return ret;
156 }
157
scmi_send_message_interrupt(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)158 static int scmi_send_message_interrupt(struct scmi_protocol *proto,
159 struct scmi_message *msg,
160 struct scmi_message *reply)
161 {
162 int ret = 0;
163
164 if (!proto->tx) {
165 return -ENODEV;
166 }
167
168 /* wait for channel to be free */
169 ret = k_mutex_lock(&proto->tx->lock, K_USEC(SCMI_CHAN_LOCK_TIMEOUT_USEC));
170 if (ret < 0) {
171 LOG_ERR("failed to acquire chan lock");
172 return ret;
173 }
174
175 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
176 if (ret < 0) {
177 LOG_ERR("failed to send message");
178 goto out_release_mutex;
179 }
180
181 /* only one protocol instance can wait for a message reply at a time */
182 ret = k_sem_take(&proto->tx->sem, K_USEC(CONFIG_ARM_SCMI_CHAN_SEM_TIMEOUT_USEC));
183 if (ret < 0) {
184 LOG_ERR("failed to wait for msg reply");
185 goto out_release_mutex;
186 }
187
188 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
189 if (ret < 0) {
190 LOG_ERR("failed to read reply");
191 goto out_release_mutex;
192 }
193
194 out_release_mutex:
195 k_mutex_unlock(&proto->tx->lock);
196
197 return ret;
198 }
199
scmi_send_message(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply,bool use_polling)200 int scmi_send_message(struct scmi_protocol *proto, struct scmi_message *msg,
201 struct scmi_message *reply, bool use_polling)
202 {
203 if (!proto->tx) {
204 return -ENODEV;
205 }
206
207 if (!proto->tx->ready) {
208 return -EINVAL;
209 }
210
211 if (use_polling) {
212 return scmi_send_message_polling(proto, msg, reply);
213 } else {
214 return scmi_send_message_interrupt(proto, msg, reply);
215 }
216 }
217
scmi_core_protocol_negotiate(struct scmi_protocol * proto)218 static int scmi_core_protocol_negotiate(struct scmi_protocol *proto)
219
220 {
221 uint32_t agent_version, platform_version;
222 int ret;
223
224 if (!proto) {
225 return -EINVAL;
226 }
227
228 agent_version = proto->version;
229
230 if (!agent_version) {
231 LOG_ERR("Protocol 0x%X: Agent version not specified", proto->id);
232 return -EINVAL;
233 }
234
235 ret = scmi_protocol_get_version(proto, &platform_version);
236 if (ret < 0) {
237 LOG_ERR("Protocol 0x%X: Failed to get platform version: %d",
238 proto->id, ret);
239 return ret;
240 }
241
242 if (platform_version > agent_version) {
243 ret = scmi_protocol_version_negotiate(proto, agent_version);
244 if (ret < 0) {
245 LOG_WRN("Protocol 0x%X: Negotiation failed (%d). "
246 "Platform v0x%08x does not support downgrade to agent v0x%08x",
247 proto->id, ret, platform_version, agent_version);
248 }
249 }
250
251 LOG_INF("Using protocol 0x%X: agent version 0x%08x, platform version 0x%08x",
252 proto->id, agent_version, platform_version);
253
254 return 0;
255 }
256
scmi_core_protocol_setup(const struct device * transport)257 static int scmi_core_protocol_setup(const struct device *transport)
258 {
259 int ret;
260
261 STRUCT_SECTION_FOREACH(scmi_protocol, it) {
262 it->transport = transport;
263
264 #ifndef CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS
265 /* no static channel allocation, attempt dynamic binding */
266 it->tx = scmi_transport_request_channel(transport, it->id, true);
267 #endif /* CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS */
268
269 if (!it->tx) {
270 return -ENODEV;
271 }
272
273 ret = scmi_core_setup_chan(transport, it->tx, true);
274 if (ret < 0) {
275 return ret;
276 }
277
278 ret = scmi_core_protocol_negotiate(it);
279 if (ret < 0) {
280 return ret;
281 }
282
283 }
284
285 return 0;
286 }
287
scmi_core_transport_init(const struct device * transport)288 int scmi_core_transport_init(const struct device *transport)
289 {
290 int ret;
291
292 ret = scmi_transport_init(transport);
293 if (ret < 0) {
294 return ret;
295 }
296
297 return scmi_core_protocol_setup(transport);
298 }
299