1 /*
2 * Copyright (c) 2023 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/mgmt/ec_host_cmd/backend.h>
12 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys/time_units.h>
15 #include <zephyr/sys/util.h>
16 #include <zephyr/types.h>
17
18 LOG_MODULE_REGISTER(host_cmd_uart, CONFIG_EC_HC_LOG_LEVEL);
19
20 /* TODO: Try to use circular mode once it is supported and compare timings */
21
22 enum uart_host_command_state {
23 /*
24 * UART host command handler not enabled.
25 */
26 UART_HOST_CMD_STATE_DISABLED,
27
28 /*
29 * This state represents UART layer is initialized and ready to
30 * receive host request. Once the response is sent, the current state is
31 * reset to this state to accept next packet.
32 */
33 UART_HOST_CMD_READY_TO_RX,
34
35 /*
36 * After first byte is received the current state is moved to receiving
37 * state until all the header bytes + datalen bytes are received.
38 * If host_request_timeout was called in this state, it would be
39 * because of an underrun situation.
40 */
41 UART_HOST_CMD_RECEIVING,
42
43 /*
44 * Once the process_request starts processing the rx buffer,
45 * the current state is moved to processing state. Host should not send
46 * any bytes in this state as it would be considered contiguous
47 * request.
48 */
49 UART_HOST_CMD_PROCESSING,
50
51 /*
52 * Once host task is ready with the response bytes, the current state is
53 * moved to sending state.
54 */
55 UART_HOST_CMD_SENDING,
56
57 /*
58 * If bad packet header is received, the current state is moved to rx_bad
59 * state and after timeout all the bytes are dropped.
60 */
61 UART_HOST_CMD_RX_BAD,
62
63 /*
64 * If extra bytes are received when the host command is being processed,
65 * host is sending extra bytes which indicates data overrun.
66 */
67 UART_HOST_CMD_RX_OVERRUN,
68 };
69
70 static const char * const state_name[] = {
71 [UART_HOST_CMD_STATE_DISABLED] = "DISABLED",
72 [UART_HOST_CMD_READY_TO_RX] = "READY_TO_RX",
73 [UART_HOST_CMD_RECEIVING] = "RECEIVING",
74 [UART_HOST_CMD_PROCESSING] = "PROCESSING",
75 [UART_HOST_CMD_SENDING] = "SENDING",
76 [UART_HOST_CMD_RX_BAD] = "RX_BAD",
77 [UART_HOST_CMD_RX_OVERRUN] = "RX_OVERRUN",
78 };
79
80 struct ec_host_cmd_uart_ctx {
81 const struct device *uart_dev;
82 struct ec_host_cmd_rx_ctx *rx_ctx;
83 const size_t rx_buf_size;
84 struct ec_host_cmd_tx_buf *tx_buf;
85 struct k_work_delayable timeout_work;
86 enum uart_host_command_state state;
87 };
88
request_expected_size(const struct ec_host_cmd_request_header * r)89 static int request_expected_size(const struct ec_host_cmd_request_header *r)
90 {
91 /* Check host request version */
92 if (r->prtcl_ver != 3) {
93 return 0;
94 }
95
96 /* Reserved byte should be 0 */
97 if (r->reserved) {
98 return 0;
99 }
100
101 return sizeof(*r) + r->data_len;
102 }
103
104 #define EC_HOST_CMD_UART_DEFINE(_name) \
105 static struct ec_host_cmd_uart_ctx _name##_hc_uart = { \
106 .rx_buf_size = CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE, \
107 }; \
108 static struct ec_host_cmd_backend _name = { \
109 .api = &ec_host_cmd_api, \
110 .ctx = &_name##_hc_uart, \
111 }
112
113 /*
114 * Max data size for a version 3 request/response packet. This is big enough
115 * to handle a request/response header, flash write offset/size and 512 bytes
116 * of request payload or 224 bytes of response payload.
117 */
118 #define UART_MAX_REQ_SIZE 0x220
119 #define UART_MAX_RESP_SIZE 0x100
120
rx_timeout(struct k_work * work)121 static void rx_timeout(struct k_work *work)
122 {
123 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
124 struct ec_host_cmd_uart_ctx *hc_uart =
125 CONTAINER_OF(dwork, struct ec_host_cmd_uart_ctx, timeout_work);
126
127 LOG_ERR("Request error in state: %s", state_name[hc_uart->state]);
128
129 uart_rx_disable(hc_uart->uart_dev);
130 uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
131
132 hc_uart->state = UART_HOST_CMD_READY_TO_RX;
133 }
134
uart_callback(const struct device * dev,struct uart_event * evt,void * user_data)135 static void uart_callback(const struct device *dev, struct uart_event *evt, void *user_data)
136 {
137 struct ec_host_cmd_uart_ctx *hc_uart = user_data;
138 size_t new_len;
139
140 switch (evt->type) {
141 case UART_RX_RDY:
142 if (hc_uart->state == UART_HOST_CMD_READY_TO_RX) {
143 hc_uart->rx_ctx->len = 0;
144 hc_uart->state = UART_HOST_CMD_RECEIVING;
145 k_work_reschedule(&hc_uart->timeout_work,
146 K_MSEC(CONFIG_EC_HOST_CMD_BACKEND_UART_TIMEOUT));
147 } else if (hc_uart->state == UART_HOST_CMD_PROCESSING ||
148 hc_uart->state == UART_HOST_CMD_SENDING) {
149 LOG_ERR("Received data while in state: %s", state_name[hc_uart->state]);
150 return;
151 } else if (hc_uart->state == UART_HOST_CMD_RX_BAD ||
152 hc_uart->state == UART_HOST_CMD_RX_OVERRUN) {
153 /* Wait for timeout if an error has been detected */
154 return;
155 }
156
157 __ASSERT(hc_uart->state == UART_HOST_CMD_RECEIVING,
158 "UART Host Command state mishandled, state: %d", hc_uart->state);
159
160 new_len = hc_uart->rx_ctx->len + evt->data.rx.len;
161
162 if (new_len > hc_uart->rx_buf_size) {
163 /* Bad data error, set the state and wait for timeout */
164 hc_uart->state = UART_HOST_CMD_RX_BAD;
165 return;
166 }
167
168 hc_uart->rx_ctx->len = new_len;
169
170 if (hc_uart->rx_ctx->len >= sizeof(struct ec_host_cmd_request_header)) {
171 /* Buffer has request header. Check header and get data_len */
172 size_t expected_len = request_expected_size(
173 (struct ec_host_cmd_request_header *)hc_uart->rx_ctx->buf);
174
175 if (expected_len == 0 || expected_len > hc_uart->rx_buf_size) {
176 /* Invalid expected size, set the state and wait for timeout */
177 hc_uart->state = UART_HOST_CMD_RX_BAD;
178 } else if (hc_uart->rx_ctx->len == expected_len) {
179 /* Don't wait for overrun, because it is already done
180 * in a UART driver.
181 */
182 (void)k_work_cancel_delayable(&hc_uart->timeout_work);
183
184 /* Disable receiving to prevent overwriting the rx buffer while
185 * processing. Enabling receiving to a temporary buffer to detect
186 * unexpected transfer while processing increases average handling
187 * time ~40% so don't do that.
188 */
189 uart_rx_disable(hc_uart->uart_dev);
190
191 /* If no data more in request, packet is complete. Start processing
192 */
193 hc_uart->state = UART_HOST_CMD_PROCESSING;
194
195 ec_host_cmd_rx_notify();
196 } else if (hc_uart->rx_ctx->len > expected_len) {
197 /* Overrun error, set the state and wait for timeout */
198 hc_uart->state = UART_HOST_CMD_RX_OVERRUN;
199 }
200 }
201 break;
202 case UART_RX_BUF_REQUEST:
203 /* Do not provide the second buffer, because we reload DMA after every packet. */
204 break;
205 case UART_TX_DONE:
206 if (hc_uart->state != UART_HOST_CMD_SENDING) {
207 LOG_ERR("Unexpected end of sending");
208 }
209 /* Receiving is already enabled in the send function. */
210 hc_uart->state = UART_HOST_CMD_READY_TO_RX;
211 break;
212 case UART_RX_STOPPED:
213 LOG_ERR("Receiving data stopped");
214 break;
215 default:
216 break;
217 }
218 }
219
ec_host_cmd_uart_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)220 static int ec_host_cmd_uart_init(const struct ec_host_cmd_backend *backend,
221 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
222 {
223 int ret;
224 struct ec_host_cmd_uart_ctx *hc_uart = backend->ctx;
225
226 hc_uart->state = UART_HOST_CMD_STATE_DISABLED;
227
228 if (!device_is_ready(hc_uart->uart_dev)) {
229 return -ENODEV;
230 }
231
232 /* UART backend needs rx and tx buffers provided by the handler */
233 if (!rx_ctx->buf || !tx->buf) {
234 return -EIO;
235 }
236
237 hc_uart->rx_ctx = rx_ctx;
238 hc_uart->tx_buf = tx;
239
240 /* Limit the requset/response max sizes */
241 if (hc_uart->rx_ctx->len_max > UART_MAX_REQ_SIZE) {
242 hc_uart->rx_ctx->len_max = UART_MAX_REQ_SIZE;
243 }
244 if (hc_uart->tx_buf->len_max > UART_MAX_RESP_SIZE) {
245 hc_uart->tx_buf->len_max = UART_MAX_RESP_SIZE;
246 }
247
248 k_work_init_delayable(&hc_uart->timeout_work, rx_timeout);
249 uart_callback_set(hc_uart->uart_dev, uart_callback, hc_uart);
250 ret = uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
251
252 hc_uart->state = UART_HOST_CMD_READY_TO_RX;
253
254 return ret;
255 }
256
ec_host_cmd_uart_send(const struct ec_host_cmd_backend * backend)257 static int ec_host_cmd_uart_send(const struct ec_host_cmd_backend *backend)
258 {
259 struct ec_host_cmd_uart_ctx *hc_uart = backend->ctx;
260 int ret;
261
262 if (hc_uart->state != UART_HOST_CMD_PROCESSING) {
263 LOG_ERR("Unexpected state while sending");
264 }
265
266 /* The state is changed to UART_HOST_CMD_READY_TO_RX in the UART_TX_DONE event */
267 hc_uart->state = UART_HOST_CMD_SENDING;
268
269 /* The rx buffer is no longer in use by command handler.
270 * Enable receiving to be ready to get a new command right after sending the response.
271 */
272 uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
273
274 /* uart_tx is non-blocking asynchronous function.
275 * The state is changed to UART_HOST_CMD_READY_TO_RX in the UART_TX_DONE event.
276 */
277 ret = uart_tx(hc_uart->uart_dev, hc_uart->tx_buf->buf, hc_uart->tx_buf->len,
278 SYS_FOREVER_US);
279
280 /* If sending fails, reset the state */
281 if (ret) {
282 hc_uart->state = UART_HOST_CMD_READY_TO_RX;
283 LOG_ERR("Sending failed");
284 }
285
286 return ret;
287 }
288
289 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
290 .init = ec_host_cmd_uart_init,
291 .send = ec_host_cmd_uart_send,
292 };
293
294 EC_HOST_CMD_UART_DEFINE(ec_host_cmd_uart);
ec_host_cmd_backend_get_uart(const struct device * dev)295 struct ec_host_cmd_backend *ec_host_cmd_backend_get_uart(const struct device *dev)
296 {
297 struct ec_host_cmd_uart_ctx *hc_uart = ec_host_cmd_uart.ctx;
298
299 hc_uart->uart_dev = dev;
300 return &ec_host_cmd_uart;
301 }
302
303 #if DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_uart_backend)) && \
304 defined(CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT)
host_cmd_init(void)305 static int host_cmd_init(void)
306 {
307 const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_host_cmd_uart_backend));
308
309 ec_host_cmd_init(ec_host_cmd_backend_get_uart(dev));
310 return 0;
311 }
312 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
313 #endif
314