1 /*
2 * Copyright (c) 2020 Google LLC.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/devicetree.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/linker/section_tags.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
12 #include <zephyr/mgmt/ec_host_cmd/backend.h>
13 #include <zephyr/sys/iterable_sections.h>
14 #include <stdio.h>
15 #include <string.h>
16
17 LOG_MODULE_REGISTER(host_cmd_handler, CONFIG_EC_HC_LOG_LEVEL);
18
19 #ifdef CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT
20 #define EC_HOST_CMD_CHOSEN_BACKEND_LIST \
21 zephyr_host_cmd_espi_backend, zephyr_host_cmd_shi_backend, zephyr_host_cmd_uart_backend, \
22 zephyr_host_cmd_spi_backend
23
24 #define EC_HOST_CMD_ADD_CHOSEN(chosen) COND_CODE_1(DT_NODE_EXISTS(DT_CHOSEN(chosen)), (1), (0))
25
26 #define NUMBER_OF_CHOSEN_BACKENDS \
27 FOR_EACH(EC_HOST_CMD_ADD_CHOSEN, (+), EC_HOST_CMD_CHOSEN_BACKEND_LIST) \
28 +0
29
30 BUILD_ASSERT(NUMBER_OF_CHOSEN_BACKENDS < 2, "Number of chosen backends > 1");
31 #endif
32
33 #define RX_HEADER_SIZE (sizeof(struct ec_host_cmd_request_header))
34 #define TX_HEADER_SIZE (sizeof(struct ec_host_cmd_response_header))
35
36 #ifdef CONFIG_EC_HOST_CMD_NOCACHE_BUFFERS
37 #define BUFFERS_CACHE_ATTR __nocache
38 #else
39 #define BUFFERS_CACHE_ATTR
40 #endif
41
42 COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF,
43 (static uint8_t hc_rx_buffer[CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE]
44 __aligned(CONFIG_EC_HOST_CMD_HANDLER_BUFFER_ALIGN)
45 BUFFERS_CACHE_ATTR;), ())
46 COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF,
47 (static uint8_t hc_tx_buffer[CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE]
48 __aligned(CONFIG_EC_HOST_CMD_HANDLER_BUFFER_ALIGN)
49 BUFFERS_CACHE_ATTR;), ())
50
51 #ifdef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
52 static K_KERNEL_STACK_DEFINE(hc_stack, CONFIG_EC_HOST_CMD_HANDLER_STACK_SIZE);
53 #endif /* CONFIG_EC_HOST_CMD_DEDICATED_THREAD */
54
55 static struct ec_host_cmd ec_host_cmd = {
56 .rx_ctx = {
57 .buf = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF, (hc_rx_buffer),
58 (NULL)),
59 .len_max = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF,
60 (CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE), (0)),
61 },
62 .tx = {
63 .buf = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF, (hc_tx_buffer),
64 (NULL)),
65 .len_max = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF,
66 (CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE), (0)),
67 },
68 };
69
70 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
71 /* Indicates that a command has sent EC_HOST_CMD_IN_PROGRESS but hasn't sent a final status */
72 static bool cmd_in_progress;
73
74 /* The final result of the last command that has sent EC_HOST_CMD_IN_PROGRESS */
75 static enum ec_host_cmd_status saved_status = EC_HOST_CMD_UNAVAILABLE;
76 static struct k_work work_in_progress;
77 ec_host_cmd_in_progress_cb_t cb_in_progress;
78 static void *user_data_in_progress;
79 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
80
81 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
82 static uint16_t suppressed_cmds[CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER];
83 static uint16_t suppressed_cmds_count[CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER];
84 static int64_t suppressed_cmds_deadline = CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_INTERVAL_SECS * 1000U;
85 static size_t suppressed_cmds_number;
86 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
87
cal_checksum(const uint8_t * const buffer,const uint16_t size)88 static uint8_t cal_checksum(const uint8_t *const buffer, const uint16_t size)
89 {
90 uint8_t checksum = 0;
91
92 for (size_t i = 0; i < size; ++i) {
93 checksum += buffer[i];
94 }
95 return (uint8_t)(-checksum);
96 }
97
98 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
ec_host_cmd_send_in_progress_ended(void)99 bool ec_host_cmd_send_in_progress_ended(void)
100 {
101 return !cmd_in_progress;
102 }
103
ec_host_cmd_send_in_progress_status(void)104 enum ec_host_cmd_status ec_host_cmd_send_in_progress_status(void)
105 {
106 enum ec_host_cmd_status ret = saved_status;
107
108 saved_status = EC_HOST_CMD_UNAVAILABLE;
109
110 return ret;
111 }
112
ec_host_cmd_send_in_progress_continue(ec_host_cmd_in_progress_cb_t cb,void * user_data)113 enum ec_host_cmd_status ec_host_cmd_send_in_progress_continue(ec_host_cmd_in_progress_cb_t cb,
114 void *user_data)
115 {
116 if (cmd_in_progress) {
117 return EC_HOST_CMD_BUSY;
118 }
119
120 cmd_in_progress = true;
121 cb_in_progress = cb;
122 user_data_in_progress = user_data;
123 saved_status = EC_HOST_CMD_UNAVAILABLE;
124 LOG_INF("HC pending");
125 k_work_submit(&work_in_progress);
126
127 return EC_HOST_CMD_SUCCESS;
128 }
129
handler_in_progress(struct k_work * work)130 static void handler_in_progress(struct k_work *work)
131 {
132 if (cb_in_progress != NULL) {
133 saved_status = cb_in_progress(user_data_in_progress);
134 LOG_INF("HC pending done, result=%d", saved_status);
135 } else {
136 saved_status = EC_HOST_CMD_UNAVAILABLE;
137 LOG_ERR("HC incorrect IN_PROGRESS callback");
138 }
139 cb_in_progress = NULL;
140 cmd_in_progress = false;
141 }
142 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
143
144 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
ec_host_cmd_add_suppressed(uint16_t cmd_id)145 int ec_host_cmd_add_suppressed(uint16_t cmd_id)
146 {
147 if (suppressed_cmds_number >= CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER) {
148 return -EIO;
149 }
150
151 suppressed_cmds[suppressed_cmds_number] = cmd_id;
152 ++suppressed_cmds_number;
153
154 return 0;
155 }
156
ec_host_cmd_is_suppressed(uint16_t cmd_id)157 static bool ec_host_cmd_is_suppressed(uint16_t cmd_id)
158 {
159 int i;
160
161 for (i = 0; i < suppressed_cmds_number; i++) {
162 if (suppressed_cmds[i] == cmd_id) {
163 suppressed_cmds_count[i]++;
164
165 return true;
166 }
167 }
168
169 return false;
170 }
171
ec_host_cmd_dump_suppressed(void)172 void ec_host_cmd_dump_suppressed(void)
173 {
174 int i;
175 int64_t uptime = k_uptime_get();
176
177 LOG_PRINTK("[%llds HC Suppressed:", uptime / 1000U);
178 for (i = 0; i < suppressed_cmds_number; i++) {
179 LOG_PRINTK(" 0x%x=%d", suppressed_cmds[i], suppressed_cmds_count[i]);
180 suppressed_cmds_count[i] = 0;
181 }
182 LOG_PRINTK("]\n");
183
184 /* Reset the timer */
185 suppressed_cmds_deadline = uptime + CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_INTERVAL_SECS * 1000U;
186 }
187
ec_host_cmd_check_suppressed(void)188 static void ec_host_cmd_check_suppressed(void)
189 {
190 if (k_uptime_get() >= suppressed_cmds_deadline) {
191 ec_host_cmd_dump_suppressed();
192 }
193 }
194 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
195
send_status_response(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_tx_buf * tx,const enum ec_host_cmd_status status)196 static void send_status_response(const struct ec_host_cmd_backend *backend,
197 struct ec_host_cmd_tx_buf *tx,
198 const enum ec_host_cmd_status status)
199 {
200 struct ec_host_cmd_response_header *const tx_header = (void *)tx->buf;
201
202 tx_header->prtcl_ver = 3;
203 tx_header->result = status;
204 tx_header->data_len = 0;
205 tx_header->reserved = 0;
206 tx_header->checksum = 0;
207 tx_header->checksum = cal_checksum((uint8_t *)tx_header, TX_HEADER_SIZE);
208
209 tx->len = TX_HEADER_SIZE;
210
211 backend->api->send(backend);
212 }
213
verify_rx(struct ec_host_cmd_rx_ctx * rx)214 static enum ec_host_cmd_status verify_rx(struct ec_host_cmd_rx_ctx *rx)
215 {
216 /* rx buf and len now have valid incoming data */
217 if (rx->len < RX_HEADER_SIZE) {
218 return EC_HOST_CMD_REQUEST_TRUNCATED;
219 }
220
221 const struct ec_host_cmd_request_header *rx_header =
222 (struct ec_host_cmd_request_header *)rx->buf;
223
224 /* Only support version 3 */
225 if (rx_header->prtcl_ver != 3) {
226 return EC_HOST_CMD_INVALID_HEADER;
227 }
228
229 const uint16_t rx_valid_data_size = rx_header->data_len + RX_HEADER_SIZE;
230 /*
231 * Ensure we received at least as much data as is expected.
232 * It is okay to receive more since some hardware interfaces
233 * add on extra padding bytes at the end.
234 */
235 if (rx->len < rx_valid_data_size) {
236 return EC_HOST_CMD_REQUEST_TRUNCATED;
237 }
238
239 /* Validate checksum */
240 if (cal_checksum((uint8_t *)rx_header, rx_valid_data_size) != 0) {
241 return EC_HOST_CMD_INVALID_CHECKSUM;
242 }
243
244 return EC_HOST_CMD_SUCCESS;
245 }
246
validate_handler(const struct ec_host_cmd_handler * handler,const struct ec_host_cmd_handler_args * args)247 static enum ec_host_cmd_status validate_handler(const struct ec_host_cmd_handler *handler,
248 const struct ec_host_cmd_handler_args *args)
249 {
250 if (handler->min_rqt_size > args->input_buf_size) {
251 return EC_HOST_CMD_REQUEST_TRUNCATED;
252 }
253
254 if (handler->min_rsp_size > args->output_buf_max) {
255 return EC_HOST_CMD_INVALID_RESPONSE;
256 }
257
258 if (args->version >= NUM_BITS(handler->version_mask) ||
259 !(handler->version_mask & BIT(args->version))) {
260 return EC_HOST_CMD_INVALID_VERSION;
261 }
262
263 return EC_HOST_CMD_SUCCESS;
264 }
265
prepare_response(struct ec_host_cmd_tx_buf * tx,uint16_t len)266 static enum ec_host_cmd_status prepare_response(struct ec_host_cmd_tx_buf *tx, uint16_t len)
267 {
268 struct ec_host_cmd_response_header *const tx_header = (void *)tx->buf;
269
270 tx_header->prtcl_ver = 3;
271 tx_header->result = EC_HOST_CMD_SUCCESS;
272 tx_header->data_len = len;
273 tx_header->reserved = 0;
274
275 const uint16_t tx_valid_data_size = tx_header->data_len + TX_HEADER_SIZE;
276
277 if (tx_valid_data_size > tx->len_max) {
278 return EC_HOST_CMD_INVALID_RESPONSE;
279 }
280
281 /* Calculate checksum */
282 tx_header->checksum = 0;
283 tx_header->checksum = cal_checksum(tx->buf, tx_valid_data_size);
284
285 tx->len = tx_valid_data_size;
286
287 return EC_HOST_CMD_SUCCESS;
288 }
289
ec_host_cmd_set_user_cb(ec_host_cmd_user_cb_t cb,void * user_data)290 void ec_host_cmd_set_user_cb(ec_host_cmd_user_cb_t cb, void *user_data)
291 {
292 struct ec_host_cmd *hc = &ec_host_cmd;
293
294 hc->user_cb = cb;
295 hc->user_data = user_data;
296 }
297
ec_host_cmd_send_response(enum ec_host_cmd_status status,const struct ec_host_cmd_handler_args * args)298 int ec_host_cmd_send_response(enum ec_host_cmd_status status,
299 const struct ec_host_cmd_handler_args *args)
300 {
301 struct ec_host_cmd *hc = &ec_host_cmd;
302 struct ec_host_cmd_tx_buf *tx = &hc->tx;
303
304 if (hc->state != EC_HOST_CMD_STATE_PROCESSING) {
305 LOG_ERR("Unexpected state while sending");
306 return -ENOTSUP;
307 }
308 hc->state = EC_HOST_CMD_STATE_SENDING;
309
310 if (status != EC_HOST_CMD_SUCCESS) {
311 const struct ec_host_cmd_request_header *const rx_header =
312 (const struct ec_host_cmd_request_header *const)hc->rx_ctx.buf;
313
314 LOG_INF("HC 0x%04x err %d", rx_header->cmd_id, status);
315 send_status_response(hc->backend, tx, status);
316 return status;
317 }
318
319 #ifdef CONFIG_EC_HOST_CMD_LOG_DBG_BUFFERS
320 if (args->output_buf_size) {
321 LOG_HEXDUMP_DBG(args->output_buf, args->output_buf_size, "HC resp:");
322 }
323 #endif
324
325 status = prepare_response(tx, args->output_buf_size);
326 if (status != EC_HOST_CMD_SUCCESS) {
327 send_status_response(hc->backend, tx, status);
328 return status;
329 }
330
331 return hc->backend->api->send(hc->backend);
332 }
333
ec_host_cmd_rx_notify(void)334 void ec_host_cmd_rx_notify(void)
335 {
336 struct ec_host_cmd *hc = &ec_host_cmd;
337 struct ec_host_cmd_rx_ctx *rx = &hc->rx_ctx;
338
339 hc->rx_status = verify_rx(rx);
340
341 if (!hc->rx_status && hc->user_cb) {
342 hc->user_cb(rx, hc->user_data);
343 }
344
345 k_sem_give(&hc->rx_ready);
346 }
347
ec_host_cmd_log_request(const uint8_t * rx_buf)348 static void ec_host_cmd_log_request(const uint8_t *rx_buf)
349 {
350 static uint16_t prev_cmd;
351 const struct ec_host_cmd_request_header *const rx_header =
352 (const struct ec_host_cmd_request_header *const)rx_buf;
353
354 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
355 if (ec_host_cmd_is_suppressed(rx_header->cmd_id)) {
356 ec_host_cmd_check_suppressed();
357
358 return;
359 }
360 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
361
362 if (IS_ENABLED(CONFIG_EC_HOST_CMD_LOG_DBG_BUFFERS)) {
363 if (rx_header->data_len) {
364 const uint8_t *rx_data = rx_buf + RX_HEADER_SIZE;
365 static const char dbg_fmt[] = "HC 0x%04x.%d:";
366 /* Use sizeof because "%04x" needs 4 bytes for command id, and
367 * %d needs 2 bytes for version, so no additional buffer is required.
368 */
369 char dbg_raw[sizeof(dbg_fmt)];
370
371 snprintf(dbg_raw, sizeof(dbg_raw), dbg_fmt, rx_header->cmd_id,
372 rx_header->cmd_ver);
373 LOG_HEXDUMP_DBG(rx_data, rx_header->data_len, dbg_raw);
374
375 return;
376 }
377 }
378
379 /* In normal output mode, skip printing repeats of the same command
380 * that occur in rapid succession - such as flash commands during
381 * software sync.
382 */
383 if (rx_header->cmd_id != prev_cmd) {
384 prev_cmd = rx_header->cmd_id;
385 LOG_INF("HC 0x%04x", rx_header->cmd_id);
386 } else {
387 LOG_DBG("HC 0x%04x", rx_header->cmd_id);
388 }
389 }
390
ec_host_cmd_thread(void * hc_handle,void * arg2,void * arg3)391 FUNC_NORETURN static void ec_host_cmd_thread(void *hc_handle, void *arg2, void *arg3)
392 {
393 ARG_UNUSED(arg2);
394 ARG_UNUSED(arg3);
395 enum ec_host_cmd_status status;
396 struct ec_host_cmd *hc = (struct ec_host_cmd *)hc_handle;
397 struct ec_host_cmd_rx_ctx *rx = &hc->rx_ctx;
398 struct ec_host_cmd_tx_buf *tx = &hc->tx;
399 const struct ec_host_cmd_handler *found_handler;
400 const struct ec_host_cmd_request_header *const rx_header = (void *)rx->buf;
401 /* The pointer to rx buffer is constant during communication */
402 struct ec_host_cmd_handler_args args = {
403 .output_buf = (uint8_t *)tx->buf + TX_HEADER_SIZE,
404 .input_buf = rx->buf + RX_HEADER_SIZE,
405 .reserved = NULL,
406 };
407
408 __ASSERT(hc->state != EC_HOST_CMD_STATE_DISABLED, "HC backend not initialized");
409
410 while (1) {
411 hc->state = EC_HOST_CMD_STATE_RECEIVING;
412 /* Wait until RX messages is received on host interface */
413 k_sem_take(&hc->rx_ready, K_FOREVER);
414 hc->state = EC_HOST_CMD_STATE_PROCESSING;
415
416 ec_host_cmd_log_request(rx->buf);
417
418 /* Check status of the rx data, that has been verified in
419 * ec_host_cmd_send_received.
420 */
421 if (hc->rx_status != EC_HOST_CMD_SUCCESS) {
422 ec_host_cmd_send_response(hc->rx_status, &args);
423 continue;
424 }
425
426 found_handler = NULL;
427 STRUCT_SECTION_FOREACH(ec_host_cmd_handler, handler) {
428 if (handler->id == rx_header->cmd_id) {
429 found_handler = handler;
430 break;
431 }
432 }
433
434 /* No handler in this image for requested command */
435 if (found_handler == NULL) {
436 ec_host_cmd_send_response(EC_HOST_CMD_INVALID_COMMAND, &args);
437 continue;
438 }
439
440 args.command = rx_header->cmd_id;
441 args.version = rx_header->cmd_ver;
442 args.input_buf_size = rx_header->data_len;
443 args.output_buf_max = tx->len_max - TX_HEADER_SIZE,
444 args.output_buf_size = 0;
445
446 status = validate_handler(found_handler, &args);
447 if (status != EC_HOST_CMD_SUCCESS) {
448 ec_host_cmd_send_response(status, &args);
449 continue;
450 }
451
452 /*
453 * Pre-emptively clear the entire response buffer so we do not
454 * have any left over contents from previous host commands.
455 */
456 memset(args.output_buf, 0, args.output_buf_max);
457
458 status = found_handler->handler(&args);
459
460 ec_host_cmd_send_response(status, &args);
461 }
462 }
463
464 #ifndef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
ec_host_cmd_task(void)465 FUNC_NORETURN void ec_host_cmd_task(void)
466 {
467 ec_host_cmd_thread(&ec_host_cmd, NULL, NULL);
468 }
469 #endif
470
ec_host_cmd_init(struct ec_host_cmd_backend * backend)471 int ec_host_cmd_init(struct ec_host_cmd_backend *backend)
472 {
473 struct ec_host_cmd *hc = &ec_host_cmd;
474 int ret;
475 uint8_t *handler_tx_buf, *handler_rx_buf;
476 uint8_t *handler_tx_buf_end, *handler_rx_buf_end;
477 uint8_t *backend_tx_buf, *backend_rx_buf;
478
479 hc->backend = backend;
480
481 /* Allow writing to rx buff at startup */
482 k_sem_init(&hc->rx_ready, 0, 1);
483
484 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
485 k_work_init(&work_in_progress, handler_in_progress);
486 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
487
488 handler_tx_buf = hc->tx.buf;
489 handler_rx_buf = hc->rx_ctx.buf;
490 handler_tx_buf_end = handler_tx_buf + CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE;
491 handler_rx_buf_end = handler_rx_buf + CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE;
492
493 ret = backend->api->init(backend, &hc->rx_ctx, &hc->tx);
494
495 backend_tx_buf = hc->tx.buf;
496 backend_rx_buf = hc->rx_ctx.buf;
497
498 if (ret != 0) {
499 return ret;
500 }
501
502 if (!backend_tx_buf || !backend_rx_buf) {
503 LOG_ERR("No buffer for Host Command communication");
504 return -EIO;
505 }
506
507 hc->state = EC_HOST_CMD_STATE_RECEIVING;
508
509 /* Check if a backend uses provided buffers. The buffer pointers can be shifted within the
510 * buffer to make space for preamble. Make sure the rx/tx pointers are within the provided
511 * buffers ranges.
512 */
513 if ((handler_tx_buf &&
514 !((handler_tx_buf <= backend_tx_buf) && (handler_tx_buf_end > backend_tx_buf))) ||
515 (handler_rx_buf &&
516 !((handler_rx_buf <= backend_rx_buf) && (handler_rx_buf_end > backend_rx_buf)))) {
517 LOG_WRN("Host Command handler provided unused buffer");
518 }
519
520 #ifdef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
521 k_thread_create(&hc->thread, hc_stack, CONFIG_EC_HOST_CMD_HANDLER_STACK_SIZE,
522 ec_host_cmd_thread, (void *)hc, NULL, NULL, CONFIG_EC_HOST_CMD_HANDLER_PRIO,
523 0, K_NO_WAIT);
524 k_thread_name_set(&hc->thread, "ec_host_cmd");
525 #endif /* CONFIG_EC_HOST_CMD_DEDICATED_THREAD */
526
527 return 0;
528 }
529
ec_host_cmd_get_hc(void)530 const struct ec_host_cmd *ec_host_cmd_get_hc(void)
531 {
532 return &ec_host_cmd;
533 }
534