1 /*
2  * Copyright (c) 2023 Google LLC
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 #include <zephyr/drivers/uart.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/mgmt/ec_host_cmd/backend.h>
12 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys/time_units.h>
15 #include <zephyr/sys/util.h>
16 #include <zephyr/types.h>
17 
18 LOG_MODULE_REGISTER(host_cmd_uart, CONFIG_EC_HC_LOG_LEVEL);
19 
20 /* TODO: Try to use circular mode once it is supported and compare timings */
21 
22 enum uart_host_command_state {
23 	/*
24 	 * UART host command handler not enabled.
25 	 */
26 	UART_HOST_CMD_STATE_DISABLED,
27 
28 	/*
29 	 * This state represents UART layer is initialized and ready to
30 	 * receive host request. Once the response is sent, the current state is
31 	 * reset to this state to accept next packet.
32 	 */
33 	UART_HOST_CMD_READY_TO_RX,
34 
35 	/*
36 	 * After first byte is received the current state is moved to receiving
37 	 * state until all the header bytes + datalen bytes are received.
38 	 * If host_request_timeout was called in this state, it would be
39 	 * because of an underrun situation.
40 	 */
41 	UART_HOST_CMD_RECEIVING,
42 
43 	/*
44 	 * Once the process_request starts processing the rx buffer,
45 	 * the current state is moved to processing state. Host should not send
46 	 * any bytes in this state as it would be considered contiguous
47 	 * request.
48 	 */
49 	UART_HOST_CMD_PROCESSING,
50 
51 	/*
52 	 * Once host task is ready with the response bytes, the current state is
53 	 * moved to sending state.
54 	 */
55 	UART_HOST_CMD_SENDING,
56 
57 	/*
58 	 * If bad packet header is received, the current state is moved to rx_bad
59 	 * state and after timeout all the bytes are dropped.
60 	 */
61 	UART_HOST_CMD_RX_BAD,
62 
63 	/*
64 	 * If extra bytes are received when the host command is being processed,
65 	 * host is sending extra bytes which indicates data overrun.
66 	 */
67 	UART_HOST_CMD_RX_OVERRUN,
68 };
69 
70 struct ec_host_cmd_uart_ctx {
71 	const struct device *uart_dev;
72 	struct ec_host_cmd_rx_ctx *rx_ctx;
73 	const size_t rx_buf_size;
74 	struct ec_host_cmd_tx_buf *tx_buf;
75 	struct k_work_delayable timeout_work;
76 	enum uart_host_command_state state;
77 };
78 
request_expected_size(const struct ec_host_cmd_request_header * r)79 static int request_expected_size(const struct ec_host_cmd_request_header *r)
80 {
81 	/* Check host request version */
82 	if (r->prtcl_ver != 3) {
83 		return 0;
84 	}
85 
86 	/* Reserved byte should be 0 */
87 	if (r->reserved) {
88 		return 0;
89 	}
90 
91 	return sizeof(*r) + r->data_len;
92 }
93 
94 #define EC_HOST_CMD_UART_DEFINE(_name)                                                             \
95 	static struct ec_host_cmd_uart_ctx _name##_hc_uart = {                                     \
96 		.rx_buf_size = CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE,                          \
97 	};                                                                                         \
98 	static struct ec_host_cmd_backend _name = {                                                \
99 		.api = &ec_host_cmd_api,                                                           \
100 		.ctx = &_name##_hc_uart,                                                           \
101 	}
102 
103 /* Timeout after receiving first byte */
104 #define UART_REQ_RX_TIMEOUT K_MSEC(150)
105 
106 /*
107  * Max data size for a version 3 request/response packet. This is big enough
108  * to handle a request/response header, flash write offset/size and 512 bytes
109  * of request payload or 224 bytes of response payload.
110  */
111 #define UART_MAX_REQ_SIZE  0x220
112 #define UART_MAX_RESP_SIZE 0x100
113 
rx_timeout(struct k_work * work)114 static void rx_timeout(struct k_work *work)
115 {
116 	struct k_work_delayable *dwork = k_work_delayable_from_work(work);
117 	struct ec_host_cmd_uart_ctx *hc_uart =
118 		CONTAINER_OF(dwork, struct ec_host_cmd_uart_ctx, timeout_work);
119 	int res;
120 
121 	switch (hc_uart->state) {
122 	case UART_HOST_CMD_RECEIVING:
123 		/* If state is receiving then timeout was hit due to underrun */
124 		LOG_ERR("Request underrun detected");
125 		break;
126 	case UART_HOST_CMD_RX_OVERRUN:
127 		/* If state is rx_overrun then timeout was hit because
128 		 * process request was cancelled and extra rx bytes were
129 		 * dropped
130 		 */
131 		LOG_ERR("Request overrun detected");
132 		break;
133 	case UART_HOST_CMD_RX_BAD:
134 		/* If state is rx_bad then packet header was bad and process
135 		 * request was cancelled to drop all incoming bytes.
136 		 */
137 		LOG_ERR("Bad packet header detected");
138 		break;
139 	default:
140 		LOG_ERR("Request timeout mishandled, state: %d", hc_uart->state);
141 	}
142 
143 	res = uart_rx_disable(hc_uart->uart_dev);
144 	res = uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
145 
146 	hc_uart->state = UART_HOST_CMD_READY_TO_RX;
147 }
148 
uart_callback(const struct device * dev,struct uart_event * evt,void * user_data)149 static void uart_callback(const struct device *dev, struct uart_event *evt, void *user_data)
150 {
151 	struct ec_host_cmd_uart_ctx *hc_uart = user_data;
152 	size_t new_len;
153 
154 	switch (evt->type) {
155 	case UART_RX_RDY:
156 		if (hc_uart->state == UART_HOST_CMD_READY_TO_RX) {
157 			hc_uart->rx_ctx->len = 0;
158 			hc_uart->state = UART_HOST_CMD_RECEIVING;
159 			k_work_reschedule(&hc_uart->timeout_work, UART_REQ_RX_TIMEOUT);
160 		} else if (hc_uart->state == UART_HOST_CMD_PROCESSING ||
161 			   hc_uart->state == UART_HOST_CMD_SENDING) {
162 			LOG_ERR("UART HOST CMD ERROR: Received data while processing or sending");
163 			return;
164 		} else if (hc_uart->state == UART_HOST_CMD_RX_BAD ||
165 			   hc_uart->state == UART_HOST_CMD_RX_OVERRUN) {
166 			/* Wait for timeout if an error has been detected */
167 			return;
168 		}
169 
170 		__ASSERT(hc_uart->state == UART_HOST_CMD_RECEIVING,
171 			 "UART Host Command state mishandled, state: %d", hc_uart->state);
172 
173 		new_len = hc_uart->rx_ctx->len + evt->data.rx.len;
174 
175 		if (new_len > hc_uart->rx_buf_size) {
176 			/* Bad data error, set the state and wait for timeout */
177 			hc_uart->state = UART_HOST_CMD_RX_BAD;
178 			return;
179 		}
180 
181 		hc_uart->rx_ctx->len = new_len;
182 
183 		if (hc_uart->rx_ctx->len >= sizeof(struct ec_host_cmd_request_header)) {
184 			/* Buffer has request header. Check header and get data_len */
185 			size_t expected_len = request_expected_size(
186 				(struct ec_host_cmd_request_header *)hc_uart->rx_ctx->buf);
187 
188 			if (expected_len == 0 || expected_len > hc_uart->rx_buf_size) {
189 				/* Invalid expected size, set the state and wait for timeout */
190 				hc_uart->state = UART_HOST_CMD_RX_BAD;
191 			} else if (hc_uart->rx_ctx->len == expected_len) {
192 				/* Don't wait for overrun, because it is already done
193 				 * in a UART driver.
194 				 */
195 				(void)k_work_cancel_delayable(&hc_uart->timeout_work);
196 
197 				/* Disable receiving to prevent overwriting the rx buffer while
198 				 * processing. Enabling receiving to a temporary buffer to detect
199 				 * unexpected transfer while processing increases average handling
200 				 * time ~40% so don't do that.
201 				 */
202 				uart_rx_disable(hc_uart->uart_dev);
203 
204 				/* If no data more in request, packet is complete. Start processing
205 				 */
206 				hc_uart->state = UART_HOST_CMD_PROCESSING;
207 
208 				ec_host_cmd_rx_notify();
209 			} else if (hc_uart->rx_ctx->len > expected_len) {
210 				/* Overrun error, set the state and wait for timeout */
211 				hc_uart->state = UART_HOST_CMD_RX_OVERRUN;
212 			}
213 		}
214 		break;
215 	case UART_RX_BUF_REQUEST:
216 		/* Do not provide the second buffer, because we reload DMA after every packet. */
217 		break;
218 	case UART_TX_DONE:
219 		if (hc_uart->state != UART_HOST_CMD_SENDING) {
220 			LOG_ERR("UART HOST CMD ERROR: unexpected end of sending");
221 		}
222 		/* Receiving is already enabled in the send function. */
223 		hc_uart->state = UART_HOST_CMD_READY_TO_RX;
224 		break;
225 	case UART_RX_STOPPED:
226 		LOG_ERR("UART HOST CMD ERROR: Receiving data stopped");
227 		break;
228 	default:
229 		break;
230 	}
231 }
232 
ec_host_cmd_uart_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)233 static int ec_host_cmd_uart_init(const struct ec_host_cmd_backend *backend,
234 				 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
235 {
236 	int ret;
237 	struct ec_host_cmd_uart_ctx *hc_uart = backend->ctx;
238 
239 	hc_uart->state = UART_HOST_CMD_STATE_DISABLED;
240 
241 	if (!device_is_ready(hc_uart->uart_dev)) {
242 		return -ENODEV;
243 	}
244 
245 	/* UART backend needs rx and tx buffers provided by the handler */
246 	if (!rx_ctx->buf || !tx->buf) {
247 		return -EIO;
248 	}
249 
250 	hc_uart->rx_ctx = rx_ctx;
251 	hc_uart->tx_buf = tx;
252 
253 	/* Limit the requset/response max sizes */
254 	if (hc_uart->rx_ctx->len_max > UART_MAX_REQ_SIZE) {
255 		hc_uart->rx_ctx->len_max = UART_MAX_REQ_SIZE;
256 	}
257 	if (hc_uart->tx_buf->len_max > UART_MAX_RESP_SIZE) {
258 		hc_uart->tx_buf->len_max = UART_MAX_RESP_SIZE;
259 	}
260 
261 	k_work_init_delayable(&hc_uart->timeout_work, rx_timeout);
262 	uart_callback_set(hc_uart->uart_dev, uart_callback, hc_uart);
263 	ret = uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
264 
265 	hc_uart->state = UART_HOST_CMD_READY_TO_RX;
266 
267 	return ret;
268 }
269 
ec_host_cmd_uart_send(const struct ec_host_cmd_backend * backend)270 static int ec_host_cmd_uart_send(const struct ec_host_cmd_backend *backend)
271 {
272 	struct ec_host_cmd_uart_ctx *hc_uart = backend->ctx;
273 	int ret;
274 
275 	if (hc_uart->state != UART_HOST_CMD_PROCESSING) {
276 		LOG_ERR("UART HOST CMD ERROR: unexpected state while sending");
277 	}
278 
279 	/* The state is changed to UART_HOST_CMD_READY_TO_RX in the UART_TX_DONE event */
280 	hc_uart->state = UART_HOST_CMD_SENDING;
281 
282 	/* The rx buffer is no longer in use by command handler.
283 	 * Enable receiving to be ready to get a new command right after sending the response.
284 	 */
285 	uart_rx_enable(hc_uart->uart_dev, hc_uart->rx_ctx->buf, hc_uart->rx_buf_size, 0);
286 
287 	/* uart_tx is non-blocking asynchronous function.
288 	 * The state is changed to UART_HOST_CMD_READY_TO_RX in the UART_TX_DONE event.
289 	 */
290 	ret = uart_tx(hc_uart->uart_dev, hc_uart->tx_buf->buf, hc_uart->tx_buf->len,
291 		      SYS_FOREVER_US);
292 
293 	/* If sending fails, reset the state */
294 	if (ret) {
295 		hc_uart->state = UART_HOST_CMD_READY_TO_RX;
296 		LOG_ERR("UART HOST CMD ERROR: sending failed");
297 	}
298 
299 	return ret;
300 }
301 
302 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
303 	.init = ec_host_cmd_uart_init,
304 	.send = ec_host_cmd_uart_send,
305 };
306 
307 EC_HOST_CMD_UART_DEFINE(ec_host_cmd_uart);
ec_host_cmd_backend_get_uart(const struct device * dev)308 struct ec_host_cmd_backend *ec_host_cmd_backend_get_uart(const struct device *dev)
309 {
310 	struct ec_host_cmd_uart_ctx *hc_uart = ec_host_cmd_uart.ctx;
311 
312 	hc_uart->uart_dev = dev;
313 	return &ec_host_cmd_uart;
314 }
315 
316 #if DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_uart_backend)) &&                                     \
317 	defined(CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT)
host_cmd_init(void)318 static int host_cmd_init(void)
319 {
320 	const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_host_cmd_uart_backend));
321 
322 	ec_host_cmd_init(ec_host_cmd_backend_get_uart(dev));
323 	return 0;
324 }
325 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
326 #endif
327