1 /*
2 * Copyright 2024 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/drivers/firmware/scmi/protocol.h>
8 #include <zephyr/drivers/firmware/scmi/transport.h>
9 #include <zephyr/logging/log.h>
10 #include <zephyr/device.h>
11
12 LOG_MODULE_REGISTER(scmi_core);
13
14 #define SCMI_CHAN_LOCK_TIMEOUT_USEC 500
15 #define SCMI_CHAN_SEM_TIMEOUT_USEC 500
16
scmi_status_to_errno(int scmi_status)17 int scmi_status_to_errno(int scmi_status)
18 {
19 switch (scmi_status) {
20 case SCMI_SUCCESS:
21 return 0;
22 case SCMI_NOT_SUPPORTED:
23 return -EOPNOTSUPP;
24 case SCMI_INVALID_PARAMETERS:
25 return -EINVAL;
26 case SCMI_DENIED:
27 return -EACCES;
28 case SCMI_NOT_FOUND:
29 return -ENOENT;
30 case SCMI_OUT_OF_RANGE:
31 return -ERANGE;
32 case SCMI_IN_USE:
33 case SCMI_BUSY:
34 return -EBUSY;
35 case SCMI_PROTOCOL_ERROR:
36 return -EPROTO;
37 case SCMI_COMMS_ERROR:
38 case SCMI_GENERIC_ERROR:
39 case SCMI_HARDWARE_ERROR:
40 default:
41 return -EIO;
42 }
43 }
44
scmi_core_reply_cb(struct scmi_channel * chan)45 static void scmi_core_reply_cb(struct scmi_channel *chan)
46 {
47 if (!k_is_pre_kernel()) {
48 k_sem_give(&chan->sem);
49 }
50 }
51
scmi_core_setup_chan(const struct device * transport,struct scmi_channel * chan,bool tx)52 static int scmi_core_setup_chan(const struct device *transport,
53 struct scmi_channel *chan, bool tx)
54 {
55 int ret;
56
57 if (!chan) {
58 return -EINVAL;
59 }
60
61 if (chan->ready) {
62 return 0;
63 }
64
65 /* no support for RX channels ATM */
66 if (!tx) {
67 return -ENOTSUP;
68 }
69
70 k_mutex_init(&chan->lock);
71 k_sem_init(&chan->sem, 0, 1);
72
73 chan->cb = scmi_core_reply_cb;
74
75 /* setup transport-related channel data */
76 ret = scmi_transport_setup_chan(transport, chan, tx);
77 if (ret < 0) {
78 LOG_ERR("failed to setup channel");
79 return ret;
80 }
81
82 /* protocols might share a channel. In such cases, this
83 * will stop them from being initialized again.
84 */
85 chan->ready = true;
86
87 return 0;
88 }
89
scmi_send_message_pre_kernel(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)90 static int scmi_send_message_pre_kernel(struct scmi_protocol *proto,
91 struct scmi_message *msg,
92 struct scmi_message *reply)
93 {
94 int ret;
95
96 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
97 if (ret < 0) {
98 return ret;
99 }
100
101 /* no kernel primitives, we're forced to poll here.
102 *
103 * Cortex-M quirk: no interrupts at this point => no timer =>
104 * no timeout mechanism => this can block the whole system.
105 *
106 * TODO: is there a better way to handle this?
107 */
108 while (!scmi_transport_channel_is_free(proto->transport, proto->tx)) {
109 }
110
111 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
112 if (ret < 0) {
113 return ret;
114 }
115
116 return ret;
117 }
118
scmi_send_message_post_kernel(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)119 static int scmi_send_message_post_kernel(struct scmi_protocol *proto,
120 struct scmi_message *msg,
121 struct scmi_message *reply)
122 {
123 int ret = 0;
124
125 if (!proto->tx) {
126 return -ENODEV;
127 }
128
129 /* wait for channel to be free */
130 ret = k_mutex_lock(&proto->tx->lock, K_USEC(SCMI_CHAN_LOCK_TIMEOUT_USEC));
131 if (ret < 0) {
132 LOG_ERR("failed to acquire chan lock");
133 return ret;
134 }
135
136 ret = scmi_transport_send_message(proto->transport, proto->tx, msg);
137 if (ret < 0) {
138 LOG_ERR("failed to send message");
139 goto out_release_mutex;
140 }
141
142 /* only one protocol instance can wait for a message reply at a time */
143 ret = k_sem_take(&proto->tx->sem, K_USEC(SCMI_CHAN_SEM_TIMEOUT_USEC));
144 if (ret < 0) {
145 LOG_ERR("failed to wait for msg reply");
146 goto out_release_mutex;
147 }
148
149 ret = scmi_transport_read_message(proto->transport, proto->tx, reply);
150 if (ret < 0) {
151 LOG_ERR("failed to read reply");
152 goto out_release_mutex;
153 }
154
155 out_release_mutex:
156 k_mutex_unlock(&proto->tx->lock);
157
158 return ret;
159 }
160
scmi_send_message(struct scmi_protocol * proto,struct scmi_message * msg,struct scmi_message * reply)161 int scmi_send_message(struct scmi_protocol *proto, struct scmi_message *msg,
162 struct scmi_message *reply)
163 {
164 if (!proto->tx) {
165 return -ENODEV;
166 }
167
168 if (!proto->tx->ready) {
169 return -EINVAL;
170 }
171
172 if (k_is_pre_kernel()) {
173 return scmi_send_message_pre_kernel(proto, msg, reply);
174 } else {
175 return scmi_send_message_post_kernel(proto, msg, reply);
176 }
177 }
178
scmi_core_protocol_setup(const struct device * transport)179 static int scmi_core_protocol_setup(const struct device *transport)
180 {
181 int ret;
182
183 STRUCT_SECTION_FOREACH(scmi_protocol, it) {
184 it->transport = transport;
185
186 #ifndef CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS
187 /* no static channel allocation, attempt dynamic binding */
188 it->tx = scmi_transport_request_channel(transport, it->id, true);
189 #endif /* CONFIG_ARM_SCMI_TRANSPORT_HAS_STATIC_CHANNELS */
190
191 if (!it->tx) {
192 return -ENODEV;
193 }
194
195 ret = scmi_core_setup_chan(transport, it->tx, true);
196 if (ret < 0) {
197 return ret;
198 }
199 }
200
201 return 0;
202 }
203
scmi_core_transport_init(const struct device * transport)204 int scmi_core_transport_init(const struct device *transport)
205 {
206 int ret;
207
208 ret = scmi_transport_init(transport);
209 if (ret < 0) {
210 return ret;
211 }
212
213 return scmi_core_protocol_setup(transport);
214 }
215