1 /*
2 * Copyright (c) 2020 Google LLC.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/devicetree.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/linker/section_tags.h>
10 #include <zephyr/logging/log.h>
11 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
12 #include <zephyr/mgmt/ec_host_cmd/backend.h>
13 #include <zephyr/sys/iterable_sections.h>
14 #include <stdio.h>
15 #include <string.h>
16
17 LOG_MODULE_REGISTER(host_cmd_handler, CONFIG_EC_HC_LOG_LEVEL);
18
19 #ifdef CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT
20 #define EC_HOST_CMD_CHOSEN_BACKEND_LIST \
21 zephyr_host_cmd_espi_backend, zephyr_host_cmd_shi_backend, zephyr_host_cmd_uart_backend, \
22 zephyr_host_cmd_spi_backend
23
24 #define EC_HOST_CMD_ADD_CHOSEN(chosen) COND_CODE_1(DT_NODE_EXISTS(DT_CHOSEN(chosen)), (1), (0))
25
26 #define NUMBER_OF_CHOSEN_BACKENDS \
27 FOR_EACH(EC_HOST_CMD_ADD_CHOSEN, (+), EC_HOST_CMD_CHOSEN_BACKEND_LIST) \
28 +0
29
30 BUILD_ASSERT(NUMBER_OF_CHOSEN_BACKENDS < 2, "Number of chosen backends > 1");
31 #endif
32
33 #define RX_HEADER_SIZE (sizeof(struct ec_host_cmd_request_header))
34 #define TX_HEADER_SIZE (sizeof(struct ec_host_cmd_response_header))
35
36 #ifdef CONFIG_EC_HOST_CMD_NOCACHE_BUFFERS
37 #define BUFFERS_CACHE_ATTR __nocache
38 #else
39 #define BUFFERS_CACHE_ATTR
40 #endif
41
42 COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF,
43 (static uint8_t hc_rx_buffer[CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE] __aligned(4)
44 BUFFERS_CACHE_ATTR;), ())
45 COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF,
46 (static uint8_t hc_tx_buffer[CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE] __aligned(4)
47 BUFFERS_CACHE_ATTR;), ())
48
49 #ifdef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
50 static K_KERNEL_STACK_DEFINE(hc_stack, CONFIG_EC_HOST_CMD_HANDLER_STACK_SIZE);
51 #endif /* CONFIG_EC_HOST_CMD_DEDICATED_THREAD */
52
53 static struct ec_host_cmd ec_host_cmd = {
54 .rx_ctx = {
55 .buf = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF, (hc_rx_buffer),
56 (NULL)),
57 .len_max = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_DEF,
58 (CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE), (0)),
59 },
60 .tx = {
61 .buf = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF, (hc_tx_buffer),
62 (NULL)),
63 .len_max = COND_CODE_1(CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_DEF,
64 (CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE), (0)),
65 },
66 };
67
68 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
69 /* Indicates that a command has sent EC_HOST_CMD_IN_PROGRESS but hasn't sent a final status */
70 static bool cmd_in_progress;
71
72 /* The final result of the last command that has sent EC_HOST_CMD_IN_PROGRESS */
73 static enum ec_host_cmd_status saved_status = EC_HOST_CMD_UNAVAILABLE;
74 static struct k_work work_in_progress;
75 ec_host_cmd_in_progress_cb_t cb_in_progress;
76 static void *user_data_in_progress;
77 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
78
79 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
80 static uint16_t suppressed_cmds[CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER];
81 static uint16_t suppressed_cmds_count[CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER];
82 static int64_t suppressed_cmds_deadline = CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_INTERVAL_SECS * 1000U;
83 static size_t suppressed_cmds_number;
84 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
85
cal_checksum(const uint8_t * const buffer,const uint16_t size)86 static uint8_t cal_checksum(const uint8_t *const buffer, const uint16_t size)
87 {
88 uint8_t checksum = 0;
89
90 for (size_t i = 0; i < size; ++i) {
91 checksum += buffer[i];
92 }
93 return (uint8_t)(-checksum);
94 }
95
96 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
ec_host_cmd_send_in_progress_ended(void)97 bool ec_host_cmd_send_in_progress_ended(void)
98 {
99 return !cmd_in_progress;
100 }
101
ec_host_cmd_send_in_progress_status(void)102 enum ec_host_cmd_status ec_host_cmd_send_in_progress_status(void)
103 {
104 enum ec_host_cmd_status ret = saved_status;
105
106 saved_status = EC_HOST_CMD_UNAVAILABLE;
107
108 return ret;
109 }
110
ec_host_cmd_send_in_progress_continue(ec_host_cmd_in_progress_cb_t cb,void * user_data)111 enum ec_host_cmd_status ec_host_cmd_send_in_progress_continue(ec_host_cmd_in_progress_cb_t cb,
112 void *user_data)
113 {
114 if (cmd_in_progress) {
115 return EC_HOST_CMD_BUSY;
116 }
117
118 cmd_in_progress = true;
119 cb_in_progress = cb;
120 user_data_in_progress = user_data;
121 saved_status = EC_HOST_CMD_UNAVAILABLE;
122 LOG_INF("HC pending");
123 k_work_submit(&work_in_progress);
124
125 return EC_HOST_CMD_SUCCESS;
126 }
127
handler_in_progress(struct k_work * work)128 static void handler_in_progress(struct k_work *work)
129 {
130 if (cb_in_progress != NULL) {
131 saved_status = cb_in_progress(user_data_in_progress);
132 LOG_INF("HC pending done, result=%d", saved_status);
133 } else {
134 saved_status = EC_HOST_CMD_UNAVAILABLE;
135 LOG_ERR("HC incorrect IN_PROGRESS callback");
136 }
137 cb_in_progress = NULL;
138 cmd_in_progress = false;
139 }
140 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
141
142 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
ec_host_cmd_add_suppressed(uint16_t cmd_id)143 int ec_host_cmd_add_suppressed(uint16_t cmd_id)
144 {
145 if (suppressed_cmds_number >= CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_NUMBER) {
146 return -EIO;
147 }
148
149 suppressed_cmds[suppressed_cmds_number] = cmd_id;
150 ++suppressed_cmds_number;
151
152 return 0;
153 }
154
ec_host_cmd_is_suppressed(uint16_t cmd_id)155 static bool ec_host_cmd_is_suppressed(uint16_t cmd_id)
156 {
157 int i;
158
159 for (i = 0; i < suppressed_cmds_number; i++) {
160 if (suppressed_cmds[i] == cmd_id) {
161 suppressed_cmds_count[i]++;
162
163 return true;
164 }
165 }
166
167 return false;
168 }
169
ec_host_cmd_dump_suppressed(void)170 void ec_host_cmd_dump_suppressed(void)
171 {
172 int i;
173 int64_t uptime = k_uptime_get();
174
175 LOG_PRINTK("[%llds HC Suppressed:", uptime / 1000U);
176 for (i = 0; i < suppressed_cmds_number; i++) {
177 LOG_PRINTK(" 0x%x=%d", suppressed_cmds[i], suppressed_cmds_count[i]);
178 suppressed_cmds_count[i] = 0;
179 }
180 LOG_PRINTK("]\n");
181
182 /* Reset the timer */
183 suppressed_cmds_deadline = uptime + CONFIG_EC_HOST_CMD_LOG_SUPPRESSED_INTERVAL_SECS * 1000U;
184 }
185
ec_host_cmd_check_suppressed(void)186 static void ec_host_cmd_check_suppressed(void)
187 {
188 if (k_uptime_get() >= suppressed_cmds_deadline) {
189 ec_host_cmd_dump_suppressed();
190 }
191 }
192 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
193
send_status_response(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_tx_buf * tx,const enum ec_host_cmd_status status)194 static void send_status_response(const struct ec_host_cmd_backend *backend,
195 struct ec_host_cmd_tx_buf *tx,
196 const enum ec_host_cmd_status status)
197 {
198 struct ec_host_cmd_response_header *const tx_header = (void *)tx->buf;
199
200 tx_header->prtcl_ver = 3;
201 tx_header->result = status;
202 tx_header->data_len = 0;
203 tx_header->reserved = 0;
204 tx_header->checksum = 0;
205 tx_header->checksum = cal_checksum((uint8_t *)tx_header, TX_HEADER_SIZE);
206
207 tx->len = TX_HEADER_SIZE;
208
209 backend->api->send(backend);
210 }
211
verify_rx(struct ec_host_cmd_rx_ctx * rx)212 static enum ec_host_cmd_status verify_rx(struct ec_host_cmd_rx_ctx *rx)
213 {
214 /* rx buf and len now have valid incoming data */
215 if (rx->len < RX_HEADER_SIZE) {
216 return EC_HOST_CMD_REQUEST_TRUNCATED;
217 }
218
219 const struct ec_host_cmd_request_header *rx_header =
220 (struct ec_host_cmd_request_header *)rx->buf;
221
222 /* Only support version 3 */
223 if (rx_header->prtcl_ver != 3) {
224 return EC_HOST_CMD_INVALID_HEADER;
225 }
226
227 const uint16_t rx_valid_data_size = rx_header->data_len + RX_HEADER_SIZE;
228 /*
229 * Ensure we received at least as much data as is expected.
230 * It is okay to receive more since some hardware interfaces
231 * add on extra padding bytes at the end.
232 */
233 if (rx->len < rx_valid_data_size) {
234 return EC_HOST_CMD_REQUEST_TRUNCATED;
235 }
236
237 /* Validate checksum */
238 if (cal_checksum((uint8_t *)rx_header, rx_valid_data_size) != 0) {
239 return EC_HOST_CMD_INVALID_CHECKSUM;
240 }
241
242 return EC_HOST_CMD_SUCCESS;
243 }
244
validate_handler(const struct ec_host_cmd_handler * handler,const struct ec_host_cmd_handler_args * args)245 static enum ec_host_cmd_status validate_handler(const struct ec_host_cmd_handler *handler,
246 const struct ec_host_cmd_handler_args *args)
247 {
248 if (handler->min_rqt_size > args->input_buf_size) {
249 return EC_HOST_CMD_REQUEST_TRUNCATED;
250 }
251
252 if (handler->min_rsp_size > args->output_buf_max) {
253 return EC_HOST_CMD_INVALID_RESPONSE;
254 }
255
256 if (args->version >= NUM_BITS(handler->version_mask) ||
257 !(handler->version_mask & BIT(args->version))) {
258 return EC_HOST_CMD_INVALID_VERSION;
259 }
260
261 return EC_HOST_CMD_SUCCESS;
262 }
263
prepare_response(struct ec_host_cmd_tx_buf * tx,uint16_t len)264 static enum ec_host_cmd_status prepare_response(struct ec_host_cmd_tx_buf *tx, uint16_t len)
265 {
266 struct ec_host_cmd_response_header *const tx_header = (void *)tx->buf;
267
268 tx_header->prtcl_ver = 3;
269 tx_header->result = EC_HOST_CMD_SUCCESS;
270 tx_header->data_len = len;
271 tx_header->reserved = 0;
272
273 const uint16_t tx_valid_data_size = tx_header->data_len + TX_HEADER_SIZE;
274
275 if (tx_valid_data_size > tx->len_max) {
276 return EC_HOST_CMD_INVALID_RESPONSE;
277 }
278
279 /* Calculate checksum */
280 tx_header->checksum = 0;
281 tx_header->checksum = cal_checksum(tx->buf, tx_valid_data_size);
282
283 tx->len = tx_valid_data_size;
284
285 return EC_HOST_CMD_SUCCESS;
286 }
287
ec_host_cmd_set_user_cb(ec_host_cmd_user_cb_t cb,void * user_data)288 void ec_host_cmd_set_user_cb(ec_host_cmd_user_cb_t cb, void *user_data)
289 {
290 struct ec_host_cmd *hc = &ec_host_cmd;
291
292 hc->user_cb = cb;
293 hc->user_data = user_data;
294 }
295
ec_host_cmd_send_response(enum ec_host_cmd_status status,const struct ec_host_cmd_handler_args * args)296 int ec_host_cmd_send_response(enum ec_host_cmd_status status,
297 const struct ec_host_cmd_handler_args *args)
298 {
299 struct ec_host_cmd *hc = &ec_host_cmd;
300 struct ec_host_cmd_tx_buf *tx = &hc->tx;
301
302 if (hc->state != EC_HOST_CMD_STATE_PROCESSING) {
303 LOG_ERR("Unexpected state while sending");
304 return -ENOTSUP;
305 }
306 hc->state = EC_HOST_CMD_STATE_SENDING;
307
308 if (status != EC_HOST_CMD_SUCCESS) {
309 const struct ec_host_cmd_request_header *const rx_header =
310 (const struct ec_host_cmd_request_header *const)hc->rx_ctx.buf;
311
312 LOG_INF("HC 0x%04x err %d", rx_header->cmd_id, status);
313 send_status_response(hc->backend, tx, status);
314 return status;
315 }
316
317 #ifdef CONFIG_EC_HOST_CMD_LOG_DBG_BUFFERS
318 if (args->output_buf_size) {
319 LOG_HEXDUMP_DBG(args->output_buf, args->output_buf_size, "HC resp:");
320 }
321 #endif
322
323 status = prepare_response(tx, args->output_buf_size);
324 if (status != EC_HOST_CMD_SUCCESS) {
325 send_status_response(hc->backend, tx, status);
326 return status;
327 }
328
329 return hc->backend->api->send(hc->backend);
330 }
331
ec_host_cmd_rx_notify(void)332 void ec_host_cmd_rx_notify(void)
333 {
334 struct ec_host_cmd *hc = &ec_host_cmd;
335 struct ec_host_cmd_rx_ctx *rx = &hc->rx_ctx;
336
337 hc->rx_status = verify_rx(rx);
338
339 if (!hc->rx_status && hc->user_cb) {
340 hc->user_cb(rx, hc->user_data);
341 }
342
343 k_sem_give(&hc->rx_ready);
344 }
345
ec_host_cmd_log_request(const uint8_t * rx_buf)346 static void ec_host_cmd_log_request(const uint8_t *rx_buf)
347 {
348 static uint16_t prev_cmd;
349 const struct ec_host_cmd_request_header *const rx_header =
350 (const struct ec_host_cmd_request_header *const)rx_buf;
351
352 #ifdef CONFIG_EC_HOST_CMD_LOG_SUPPRESSED
353 if (ec_host_cmd_is_suppressed(rx_header->cmd_id)) {
354 ec_host_cmd_check_suppressed();
355
356 return;
357 }
358 #endif /* CONFIG_EC_HOST_CMD_LOG_SUPPRESSED */
359
360 if (IS_ENABLED(CONFIG_EC_HOST_CMD_LOG_DBG_BUFFERS)) {
361 if (rx_header->data_len) {
362 const uint8_t *rx_data = rx_buf + RX_HEADER_SIZE;
363 static const char dbg_fmt[] = "HC 0x%04x.%d:";
364 /* Use sizeof because "%04x" needs 4 bytes for command id, and
365 * %d needs 2 bytes for version, so no additional buffer is required.
366 */
367 char dbg_raw[sizeof(dbg_fmt)];
368
369 snprintf(dbg_raw, sizeof(dbg_raw), dbg_fmt, rx_header->cmd_id,
370 rx_header->cmd_ver);
371 LOG_HEXDUMP_DBG(rx_data, rx_header->data_len, dbg_raw);
372
373 return;
374 }
375 }
376
377 /* In normal output mode, skip printing repeats of the same command
378 * that occur in rapid succession - such as flash commands during
379 * software sync.
380 */
381 if (rx_header->cmd_id != prev_cmd) {
382 prev_cmd = rx_header->cmd_id;
383 LOG_INF("HC 0x%04x", rx_header->cmd_id);
384 } else {
385 LOG_DBG("HC 0x%04x", rx_header->cmd_id);
386 }
387 }
388
ec_host_cmd_thread(void * hc_handle,void * arg2,void * arg3)389 FUNC_NORETURN static void ec_host_cmd_thread(void *hc_handle, void *arg2, void *arg3)
390 {
391 ARG_UNUSED(arg2);
392 ARG_UNUSED(arg3);
393 enum ec_host_cmd_status status;
394 struct ec_host_cmd *hc = (struct ec_host_cmd *)hc_handle;
395 struct ec_host_cmd_rx_ctx *rx = &hc->rx_ctx;
396 struct ec_host_cmd_tx_buf *tx = &hc->tx;
397 const struct ec_host_cmd_handler *found_handler;
398 const struct ec_host_cmd_request_header *const rx_header = (void *)rx->buf;
399 /* The pointer to rx buffer is constant during communication */
400 struct ec_host_cmd_handler_args args = {
401 .output_buf = (uint8_t *)tx->buf + TX_HEADER_SIZE,
402 .input_buf = rx->buf + RX_HEADER_SIZE,
403 .reserved = NULL,
404 };
405
406 __ASSERT(hc->state != EC_HOST_CMD_STATE_DISABLED, "HC backend not initialized");
407
408 while (1) {
409 hc->state = EC_HOST_CMD_STATE_RECEIVING;
410 /* Wait until RX messages is received on host interface */
411 k_sem_take(&hc->rx_ready, K_FOREVER);
412 hc->state = EC_HOST_CMD_STATE_PROCESSING;
413
414 ec_host_cmd_log_request(rx->buf);
415
416 /* Check status of the rx data, that has been verified in
417 * ec_host_cmd_send_received.
418 */
419 if (hc->rx_status != EC_HOST_CMD_SUCCESS) {
420 ec_host_cmd_send_response(hc->rx_status, &args);
421 continue;
422 }
423
424 found_handler = NULL;
425 STRUCT_SECTION_FOREACH(ec_host_cmd_handler, handler) {
426 if (handler->id == rx_header->cmd_id) {
427 found_handler = handler;
428 break;
429 }
430 }
431
432 /* No handler in this image for requested command */
433 if (found_handler == NULL) {
434 ec_host_cmd_send_response(EC_HOST_CMD_INVALID_COMMAND, &args);
435 continue;
436 }
437
438 args.command = rx_header->cmd_id;
439 args.version = rx_header->cmd_ver;
440 args.input_buf_size = rx_header->data_len;
441 args.output_buf_max = tx->len_max - TX_HEADER_SIZE,
442 args.output_buf_size = 0;
443
444 status = validate_handler(found_handler, &args);
445 if (status != EC_HOST_CMD_SUCCESS) {
446 ec_host_cmd_send_response(status, &args);
447 continue;
448 }
449
450 /*
451 * Pre-emptively clear the entire response buffer so we do not
452 * have any left over contents from previous host commands.
453 */
454 memset(args.output_buf, 0, args.output_buf_max);
455
456 status = found_handler->handler(&args);
457
458 ec_host_cmd_send_response(status, &args);
459 }
460 }
461
462 #ifndef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
ec_host_cmd_task(void)463 FUNC_NORETURN void ec_host_cmd_task(void)
464 {
465 ec_host_cmd_thread(&ec_host_cmd, NULL, NULL);
466 }
467 #endif
468
ec_host_cmd_init(struct ec_host_cmd_backend * backend)469 int ec_host_cmd_init(struct ec_host_cmd_backend *backend)
470 {
471 struct ec_host_cmd *hc = &ec_host_cmd;
472 int ret;
473 uint8_t *handler_tx_buf, *handler_rx_buf;
474 uint8_t *handler_tx_buf_end, *handler_rx_buf_end;
475 uint8_t *backend_tx_buf, *backend_rx_buf;
476
477 hc->backend = backend;
478
479 /* Allow writing to rx buff at startup */
480 k_sem_init(&hc->rx_ready, 0, 1);
481
482 #ifdef CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS
483 k_work_init(&work_in_progress, handler_in_progress);
484 #endif /* CONFIG_EC_HOST_CMD_IN_PROGRESS_STATUS */
485
486 handler_tx_buf = hc->tx.buf;
487 handler_rx_buf = hc->rx_ctx.buf;
488 handler_tx_buf_end = handler_tx_buf + CONFIG_EC_HOST_CMD_HANDLER_TX_BUFFER_SIZE;
489 handler_rx_buf_end = handler_rx_buf + CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE;
490
491 ret = backend->api->init(backend, &hc->rx_ctx, &hc->tx);
492
493 backend_tx_buf = hc->tx.buf;
494 backend_rx_buf = hc->rx_ctx.buf;
495
496 if (ret != 0) {
497 return ret;
498 }
499
500 if (!backend_tx_buf || !backend_rx_buf) {
501 LOG_ERR("No buffer for Host Command communication");
502 return -EIO;
503 }
504
505 hc->state = EC_HOST_CMD_STATE_RECEIVING;
506
507 /* Check if a backend uses provided buffers. The buffer pointers can be shifted within the
508 * buffer to make space for preamble. Make sure the rx/tx pointers are within the provided
509 * buffers ranges.
510 */
511 if ((handler_tx_buf &&
512 !((handler_tx_buf <= backend_tx_buf) && (handler_tx_buf_end > backend_tx_buf))) ||
513 (handler_rx_buf &&
514 !((handler_rx_buf <= backend_rx_buf) && (handler_rx_buf_end > backend_rx_buf)))) {
515 LOG_WRN("Host Command handler provided unused buffer");
516 }
517
518 #ifdef CONFIG_EC_HOST_CMD_DEDICATED_THREAD
519 k_thread_create(&hc->thread, hc_stack, CONFIG_EC_HOST_CMD_HANDLER_STACK_SIZE,
520 ec_host_cmd_thread, (void *)hc, NULL, NULL, CONFIG_EC_HOST_CMD_HANDLER_PRIO,
521 0, K_NO_WAIT);
522 k_thread_name_set(&hc->thread, "ec_host_cmd");
523 #endif /* CONFIG_EC_HOST_CMD_DEDICATED_THREAD */
524
525 return 0;
526 }
527
ec_host_cmd_get_hc(void)528 const struct ec_host_cmd *ec_host_cmd_get_hc(void)
529 {
530 return &ec_host_cmd;
531 }
532