1 /*
2 * Copyright (c) 2022 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <string.h>
8
9 #include <zephyr/device.h>
10 #include <zephyr/drivers/espi.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/mgmt/ec_host_cmd/backend.h>
13 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
14
15 LOG_MODULE_REGISTER(host_cmd_espi, CONFIG_EC_HC_LOG_LEVEL);
16
17 #define RX_HEADER_SIZE (sizeof(struct ec_host_cmd_request_header))
18
19 /* eSPI Host Command state */
20 enum ec_host_cmd_espi_state {
21 /* Interface is disabled */
22 ESPI_STATE_DISABLED,
23 /* Ready to receive next request */
24 ESPI_STATE_READY_TO_RECV,
25 /* Processing request */
26 ESPI_STATE_PROCESSING,
27 /* Processing request */
28 ESPI_STATE_SENDING,
29 ESPI_STATE_COUNT,
30 };
31
32 struct ec_host_cmd_espi_ctx {
33 /* eSPI device instance */
34 const struct device *espi_dev;
35 /* Context for read operation */
36 struct ec_host_cmd_rx_ctx *rx_ctx;
37 /* Transmit buffer */
38 struct ec_host_cmd_tx_buf *tx;
39 /* eSPI callback */
40 struct espi_callback espi_cb;
41 /* eSPI Host Command state */
42 enum ec_host_cmd_espi_state state;
43 };
44
45 #define EC_HOST_CMD_ESPI_DEFINE(_name) \
46 static struct ec_host_cmd_espi_ctx _name##_hc_espi; \
47 struct ec_host_cmd_backend _name = { \
48 .api = &ec_host_cmd_api, \
49 .ctx = (struct ec_host_cmd_espi_ctx *)&_name##_hc_espi, \
50 }
51
espi_handler(const struct device * dev,struct espi_callback * cb,struct espi_event espi_evt)52 static void espi_handler(const struct device *dev, struct espi_callback *cb,
53 struct espi_event espi_evt)
54 {
55 struct ec_host_cmd_espi_ctx *hc_espi =
56 CONTAINER_OF(cb, struct ec_host_cmd_espi_ctx, espi_cb);
57 uint16_t event_type = (uint16_t)espi_evt.evt_details;
58 /* tx stores the shared memory buf pointer and size, so use it */
59 const struct ec_host_cmd_request_header *rx_header = hc_espi->tx->buf;
60 const size_t shared_size = hc_espi->tx->len_max;
61 const uint16_t rx_valid_data_size = rx_header->data_len + RX_HEADER_SIZE;
62
63 if (event_type != ESPI_PERIPHERAL_EC_HOST_CMD) {
64 return;
65 }
66
67 /* Make sure we've received a Host Command in a good state not to override buffers for
68 * a Host Command that is currently being processed. There is a moment between sending
69 * a response and setting state to ESPI_STATE_READY_TO_RECV when we can receive a new
70 * host command, so accept the sending state as well.
71 */
72 if (hc_espi->state != ESPI_STATE_READY_TO_RECV && hc_espi->state != ESPI_STATE_SENDING) {
73 LOG_ERR("Received HC in bad state");
74 return;
75 }
76
77 /* Only support version 3 and make sure the number of bytes to copy is not
78 * bigger than rx buf size or the shared memory size
79 */
80 if (rx_header->prtcl_ver != 3 ||
81 rx_valid_data_size > CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE ||
82 rx_valid_data_size > shared_size) {
83 memcpy(hc_espi->rx_ctx->buf, (void *)rx_header, RX_HEADER_SIZE);
84 hc_espi->rx_ctx->len = RX_HEADER_SIZE;
85 } else {
86 memcpy(hc_espi->rx_ctx->buf, (void *)rx_header, rx_valid_data_size);
87 hc_espi->rx_ctx->len = rx_valid_data_size;
88 }
89
90 /* Even in case of errors, let the general handler send response */
91 hc_espi->state = ESPI_STATE_PROCESSING;
92 k_sem_give(&hc_espi->rx_ctx->handler_owns);
93 }
94
ec_host_cmd_espi_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)95 static int ec_host_cmd_espi_init(const struct ec_host_cmd_backend *backend,
96 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
97 {
98 struct ec_host_cmd_espi_ctx *hc_espi = (struct ec_host_cmd_espi_ctx *)backend->ctx;
99
100 hc_espi->state = ESPI_STATE_DISABLED;
101
102 if (!device_is_ready(hc_espi->espi_dev)) {
103 return -ENODEV;
104 }
105
106 hc_espi->rx_ctx = rx_ctx;
107 hc_espi->tx = tx;
108
109 espi_init_callback(&hc_espi->espi_cb, espi_handler, ESPI_BUS_PERIPHERAL_NOTIFICATION);
110 espi_add_callback(hc_espi->espi_dev, &hc_espi->espi_cb);
111 /* Use shared memory as the tx buffer */
112 espi_read_lpc_request(hc_espi->espi_dev, ECUSTOM_HOST_CMD_GET_PARAM_MEMORY,
113 (uint32_t *)&tx->buf);
114 espi_read_lpc_request(hc_espi->espi_dev, ECUSTOM_HOST_CMD_GET_PARAM_MEMORY_SIZE,
115 &tx->len_max);
116
117 hc_espi->state = ESPI_STATE_READY_TO_RECV;
118
119 return 0;
120 }
121
ec_host_cmd_espi_send(const struct ec_host_cmd_backend * backend)122 static int ec_host_cmd_espi_send(const struct ec_host_cmd_backend *backend)
123 {
124 struct ec_host_cmd_espi_ctx *hc_espi = (struct ec_host_cmd_espi_ctx *)backend->ctx;
125 struct ec_host_cmd_response_header *resp_hdr = hc_espi->tx->buf;
126 uint32_t result = resp_hdr->result;
127 int ret;
128
129 /* Ignore in-progress on eSPI since interface is synchronous anyway */
130 if (result == EC_HOST_CMD_IN_PROGRESS)
131 return 0;
132
133 hc_espi->state = ESPI_STATE_SENDING;
134
135 /* Data to transfer are already in the tx buffer (shared memory) */
136 ret = espi_write_lpc_request(hc_espi->espi_dev, ECUSTOM_HOST_CMD_SEND_RESULT, &result);
137 hc_espi->state = ESPI_STATE_READY_TO_RECV;
138
139 return ret;
140 }
141
142 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
143 .init = &ec_host_cmd_espi_init,
144 .send = &ec_host_cmd_espi_send,
145 };
146
147 EC_HOST_CMD_ESPI_DEFINE(ec_host_cmd_espi);
ec_host_cmd_backend_get_espi(const struct device * dev)148 struct ec_host_cmd_backend *ec_host_cmd_backend_get_espi(const struct device *dev)
149 {
150 ((struct ec_host_cmd_espi_ctx *)(ec_host_cmd_espi.ctx))->espi_dev = dev;
151 return &ec_host_cmd_espi;
152 }
153
154 #if DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_espi_backend)) && \
155 defined(CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT)
host_cmd_init(void)156 static int host_cmd_init(void)
157 {
158 const struct device *const dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_host_cmd_espi_backend));
159
160 ec_host_cmd_init(ec_host_cmd_backend_get_espi(dev));
161 return 0;
162 }
163 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
164 #endif
165