1 /*
2 * Copyright (c) 2023 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /* The SPI STM32 backend implements dedicated SPI driver for Host Commands. Unfortunately, the
8 * current SPI API can't be used to handle the host commands communication. The main issues are
9 * unknown command size sent by the host (the SPI transaction sends/receives specific number of
10 * bytes) and need to constant sending status byte (the SPI module is enabled and disabled per
11 * transaction), see https://github.com/zephyrproject-rtos/zephyr/issues/56091.
12 */
13
14 #include <zephyr/logging/log.h>
15 LOG_MODULE_REGISTER(host_cmd_spi, CONFIG_EC_HC_LOG_LEVEL);
16
17 #include <stm32_ll_spi.h>
18 #include <zephyr/device.h>
19 #include <zephyr/drivers/clock_control/stm32_clock_control.h>
20 #include <zephyr/drivers/dma/dma_stm32.h>
21 #include <zephyr/drivers/dma.h>
22 #include <zephyr/drivers/gpio.h>
23 #include <zephyr/drivers/spi.h>
24 #include <zephyr/drivers/pinctrl.h>
25 #include <zephyr/mgmt/ec_host_cmd/backend.h>
26 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
27 #include <zephyr/pm/policy.h>
28 #include <zephyr/pm/device.h>
29 #include <zephyr/sys/atomic.h>
30 #include <zephyr/sys/time_units.h>
31
32 /* The default compatible string of a SPI devicetree node has to be replaced with the one
33 * dedicated for Host Commands. It disabled standard SPI driver. For STM32 SPI "st,stm32-spi" has
34 * to be changed to "st,stm32-spi-host-cmd". The remaining "additional" compatible strings should
35 * stay the same.
36 */
37 #define ST_STM32_SPI_HOST_CMD_COMPAT st_stm32_spi_host_cmd
38 BUILD_ASSERT(DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_spi_backend)),
39 "The chosen backend node is obligatory for SPI STM32 backend.");
40 BUILD_ASSERT(DT_NODE_HAS_COMPAT_STATUS(DT_CHOSEN(zephyr_host_cmd_spi_backend),
41 ST_STM32_SPI_HOST_CMD_COMPAT, okay),
42 "Invalid compatible of the chosen spi node.");
43
44 #define RX_HEADER_SIZE (sizeof(struct ec_host_cmd_request_header))
45
46 /* Framing byte which precedes a response packet from the EC. After sending a
47 * request, the host will clock in bytes until it sees the framing byte, then
48 * clock in the response packet.
49 */
50 #define EC_SPI_FRAME_START 0xec
51
52 /* Padding bytes which are clocked out after the end of a response packet.*/
53 #define EC_SPI_PAST_END 0xed
54
55 /* The number of the ending bytes. The number can be bigger than 1 for chip families
56 * than need to bypass the DMA threshold.
57 */
58 #define EC_SPI_PAST_END_LENGTH 1
59
60 /* EC is ready to receive.*/
61 #define EC_SPI_RX_READY 0x78
62
63 /* EC has started receiving the request from the host, but hasn't started
64 * processing it yet.
65 */
66 #define EC_SPI_RECEIVING 0xf9
67
68 /* EC has received the entire request from the host and is processing it. */
69 #define EC_SPI_PROCESSING 0xfa
70
71 /* EC received bad data from the host, such as a packet header with an invalid
72 * length. EC will ignore all data until chip select deasserts.
73 */
74 #define EC_SPI_RX_BAD_DATA 0xfb
75
76 /* EC received data from the AP before it was ready. That is, the host asserted
77 * chip select and started clocking data before the EC was ready to receive it.
78 * EC will ignore all data until chip select deasserts.
79 */
80 #define EC_SPI_NOT_READY 0xfc
81
82 /* Supported version of host commands protocol. */
83 #define EC_HOST_REQUEST_VERSION 3
84
85 /* Timeout to wait for SPI request packet
86 *
87 * This affects the slowest SPI clock we can support. A delay of 8192 us
88 * permits a 512-byte request at 500 KHz, assuming the master starts sending
89 * bytes as soon as it asserts chip select. That's as slow as we would
90 * practically want to run the SPI interface, since running it slower
91 * significantly impacts firmware update times.
92 */
93 #define EC_SPI_CMD_RX_TIMEOUT_US 8192
94
95 #if DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32h7_spi)
96 #define EC_HOST_CMD_ST_STM32H7
97 #endif /* st_stm32h7_spi */
98
99 #if DT_NODE_HAS_COMPAT(DT_CHOSEN(zephyr_host_cmd_spi_backend), st_stm32_spi_fifo)
100 #define EC_HOST_CMD_ST_STM32_FIFO
101 #endif /* st_stm32_spi_fifo */
102
103 #define STM32_DMA_FEATURES_ID(id, dir) DT_DMAS_CELL_BY_NAME_OR(id, dir, features, 0)
104
105 #if DT_CLOCKS_HAS_IDX(DT_CHOSEN(zephyr_host_cmd_spi_backend), 1)
106 #define STM32_EC_HOST_CMD_SPI_DOMAIN_CLOCK_SUPPORT 1
107 #else
108 #define STM32_EC_HOST_CMD_SPI_DOMAIN_CLOCK_SUPPORT 0
109 #endif
110
111 /*
112 * Max data size for a version 3 request/response packet. This is big enough
113 * to handle a request/response header, flash write offset/size, and 512 bytes
114 * of flash data.
115 */
116 #define SPI_MAX_REQ_SIZE 0x220
117 #define SPI_MAX_RESP_SIZE 0x220
118
119 /* Enumeration to maintain different states of incoming request from
120 * host
121 */
122 enum spi_host_command_state {
123 /* SPI not enabled (initial state, and when chipset is off) */
124 SPI_HOST_CMD_STATE_DISABLED = 0,
125
126 /* SPI module enabled, but not ready to receive */
127 SPI_HOST_CMD_STATE_RX_NOT_READY,
128
129 /* Ready to receive next request */
130 SPI_HOST_CMD_STATE_READY_TO_RX,
131
132 /* Receiving request */
133 SPI_HOST_CMD_STATE_RECEIVING,
134
135 /* Processing request */
136 SPI_HOST_CMD_STATE_PROCESSING,
137
138 /* Sending response */
139 SPI_HOST_CMD_STATE_SENDING,
140
141 /* Received bad data - transaction started before we were ready, or
142 * packet header from host didn't parse properly. Ignoring received
143 * data.
144 */
145 SPI_HOST_CMD_STATE_RX_BAD,
146 };
147
148 struct dma_stream {
149 const struct device *dma_dev;
150 uint32_t channel;
151 struct dma_config dma_cfg;
152 struct dma_block_config dma_blk_cfg;
153 int fifo_threshold;
154 };
155
156 struct ec_host_cmd_spi_cfg {
157 SPI_TypeDef *spi;
158 const struct pinctrl_dev_config *pcfg;
159 const struct stm32_pclken *pclken;
160 size_t pclk_len;
161 };
162
163 struct ec_host_cmd_spi_ctx {
164 struct gpio_dt_spec cs;
165 struct gpio_callback cs_callback;
166 struct ec_host_cmd_spi_cfg *spi_config;
167 struct ec_host_cmd_rx_ctx *rx_ctx;
168 struct ec_host_cmd_tx_buf *tx;
169 uint8_t *tx_buf;
170 struct dma_stream *dma_rx;
171 struct dma_stream *dma_tx;
172 enum spi_host_command_state state;
173 int prepare_rx_later;
174 #ifdef CONFIG_PM
175 ATOMIC_DEFINE(pm_policy_lock_on, 1);
176 #endif /* CONFIG_PM */
177 };
178
179 static const uint8_t out_preamble[4] = {
180 EC_SPI_PROCESSING, EC_SPI_PROCESSING, EC_SPI_PROCESSING,
181 EC_SPI_FRAME_START, /* This is the byte which matters */
182 };
183
184 static void dma_callback(const struct device *dev, void *arg, uint32_t channel, int status);
185 static int prepare_rx(struct ec_host_cmd_spi_ctx *hc_spi);
186
187 #define SPI_DMA_CHANNEL_INIT(id, dir, dir_cap, src_dev, dest_dev) \
188 .dma_dev = DEVICE_DT_GET(DT_DMAS_CTLR_BY_NAME(id, dir)), \
189 .channel = DT_DMAS_CELL_BY_NAME(id, dir, channel), \
190 .dma_cfg = \
191 { \
192 .dma_slot = DT_DMAS_CELL_BY_NAME(id, dir, slot), \
193 .channel_direction = STM32_DMA_CONFIG_DIRECTION( \
194 DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \
195 .source_data_size = STM32_DMA_CONFIG_##src_dev##_DATA_SIZE( \
196 DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \
197 .dest_data_size = STM32_DMA_CONFIG_##dest_dev##_DATA_SIZE( \
198 DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \
199 .source_burst_length = 1, /* SINGLE transfer */ \
200 .dest_burst_length = 1, /* SINGLE transfer */ \
201 .channel_priority = STM32_DMA_CONFIG_PRIORITY( \
202 DT_DMAS_CELL_BY_NAME(id, dir, channel_config)), \
203 .dma_callback = dma_callback, \
204 .block_count = 2, \
205 }, \
206 .fifo_threshold = STM32_DMA_FEATURES_FIFO_THRESHOLD(STM32_DMA_FEATURES_ID(id, dir)),
207
208 #define STM32_SPI_INIT(id) \
209 PINCTRL_DT_DEFINE(id); \
210 static const struct stm32_pclken pclken[] = STM32_DT_CLOCKS(id); \
211 \
212 static struct ec_host_cmd_spi_cfg ec_host_cmd_spi_cfg = { \
213 .spi = (SPI_TypeDef *)DT_REG_ADDR(id), \
214 .pcfg = PINCTRL_DT_DEV_CONFIG_GET(id), \
215 .pclken = pclken, \
216 .pclk_len = DT_NUM_CLOCKS(id), \
217 }; \
218 \
219 static struct dma_stream dma_rx = {SPI_DMA_CHANNEL_INIT(id, rx, RX, PERIPHERAL, MEMORY)}; \
220 static struct dma_stream dma_tx = {SPI_DMA_CHANNEL_INIT(id, tx, TX, MEMORY, PERIPHERAL)}
221
222 STM32_SPI_INIT(DT_CHOSEN(zephyr_host_cmd_spi_backend));
223
224 #define EC_HOST_CMD_SPI_DEFINE(_name) \
225 static struct ec_host_cmd_spi_ctx _name##_hc_spi = { \
226 .dma_rx = &dma_rx, \
227 .dma_tx = &dma_tx, \
228 .spi_config = &ec_host_cmd_spi_cfg, \
229 }; \
230 struct ec_host_cmd_backend _name = { \
231 .api = &ec_host_cmd_api, \
232 .ctx = (struct ec_host_cmd_spi_ctx *)&_name##_hc_spi, \
233 }
234
dma_source_addr(SPI_TypeDef * spi)235 static inline uint32_t dma_source_addr(SPI_TypeDef *spi)
236 {
237 #ifdef EC_HOST_CMD_ST_STM32H7
238 return (uint32_t)(&spi->RXDR);
239 #else
240 return (uint32_t)LL_SPI_DMA_GetRegAddr(spi);
241 #endif /* EC_HOST_CMD_ST_STM32H7 */
242 }
243
dma_dest_addr(SPI_TypeDef * spi)244 static inline uint32_t dma_dest_addr(SPI_TypeDef *spi)
245 {
246 #ifdef EC_HOST_CMD_ST_STM32H7
247 return (uint32_t)(&spi->TXDR);
248 #else
249 return (uint32_t)LL_SPI_DMA_GetRegAddr(spi);
250 #endif /* EC_HOST_CMD_ST_STM32H7 */
251 }
252
253 /* Set TX register to send status, while SPI module is enabled */
tx_status(SPI_TypeDef * spi,uint8_t status)254 static inline void tx_status(SPI_TypeDef *spi, uint8_t status)
255 {
256 /* The number of status bytes to sent can be bigger than 1 for chip
257 * families than need to bypass the DMA threshold.
258 */
259 LL_SPI_TransmitData8(spi, status);
260 #ifdef EC_HOST_CMD_ST_STM32H7
261 LL_SPI_SetUDRPattern(spi, status);
262 #endif /* EC_HOST_CMD_ST_STM32H7 */
263 }
264
expected_size(const struct ec_host_cmd_request_header * header)265 static int expected_size(const struct ec_host_cmd_request_header *header)
266 {
267 /* Check host request version */
268 if (header->prtcl_ver != EC_HOST_REQUEST_VERSION) {
269 return 0;
270 }
271
272 /* Reserved byte should be 0 */
273 if (header->reserved) {
274 return 0;
275 }
276
277 return sizeof(*header) + header->data_len;
278 }
279
280 #ifdef CONFIG_PM
ec_host_cmd_pm_policy_state_lock_get(struct ec_host_cmd_spi_ctx * hc_spi)281 static void ec_host_cmd_pm_policy_state_lock_get(struct ec_host_cmd_spi_ctx *hc_spi)
282 {
283 if (!atomic_test_and_set_bit(hc_spi->pm_policy_lock_on, 0)) {
284 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
285 }
286 }
287
ec_host_cmd_pm_policy_state_lock_put(struct ec_host_cmd_spi_ctx * hc_spi)288 static void ec_host_cmd_pm_policy_state_lock_put(struct ec_host_cmd_spi_ctx *hc_spi)
289 {
290 if (atomic_test_and_clear_bit(hc_spi->pm_policy_lock_on, 0)) {
291 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
292 }
293 }
294 #else
ec_host_cmd_pm_policy_state_lock_get(struct ec_host_cmd_spi_ctx * hc_spi)295 static inline void ec_host_cmd_pm_policy_state_lock_get(struct ec_host_cmd_spi_ctx *hc_spi)
296 {
297 ARG_UNUSED(hc_spi);
298 }
299
ec_host_cmd_pm_policy_state_lock_put(struct ec_host_cmd_spi_ctx * hc_spi)300 static void ec_host_cmd_pm_policy_state_lock_put(struct ec_host_cmd_spi_ctx *hc_spi)
301 {
302 ARG_UNUSED(hc_spi);
303 }
304 #endif /* CONFIG_PM */
305
dma_callback(const struct device * dev,void * arg,uint32_t channel,int status)306 static void dma_callback(const struct device *dev, void *arg, uint32_t channel, int status)
307 {
308 struct ec_host_cmd_spi_ctx *hc_spi = arg;
309
310 /* End of sending */
311 if (channel == hc_spi->dma_tx->channel) {
312 if (hc_spi->prepare_rx_later) {
313 int ret;
314
315 ret = prepare_rx(hc_spi);
316
317 if (ret) {
318 LOG_ERR("Failed to prepare RX later");
319 }
320 } else {
321 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
322 SPI_TypeDef *spi = cfg->spi;
323
324 /* Set the status not ready. Prepare RX after CS deassertion */
325 tx_status(spi, EC_SPI_NOT_READY);
326 hc_spi->state = SPI_HOST_CMD_STATE_RX_NOT_READY;
327 }
328 }
329 }
330
spi_init(const struct ec_host_cmd_spi_ctx * hc_spi)331 static int spi_init(const struct ec_host_cmd_spi_ctx *hc_spi)
332 {
333 int err;
334
335 if (!device_is_ready(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE))) {
336 LOG_ERR("Clock control device not ready");
337 return -ENODEV;
338 }
339
340 err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
341 (clock_control_subsys_t)&hc_spi->spi_config->pclken[0]);
342 if (err < 0) {
343 LOG_ERR("Could not enable SPI clock");
344 return err;
345 }
346
347 if (IS_ENABLED(STM32_EC_HOST_CMD_SPI_DOMAIN_CLOCK_SUPPORT) &&
348 hc_spi->spi_config->pclk_len > 1) {
349 err = clock_control_configure(
350 DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
351 (clock_control_subsys_t)&hc_spi->spi_config->pclken[1], NULL);
352 if (err < 0) {
353 LOG_ERR("Could not select SPI domain clock");
354 return err;
355 }
356 }
357
358 /* Configure dt provided device signals when available */
359 err = pinctrl_apply_state(hc_spi->spi_config->pcfg, PINCTRL_STATE_DEFAULT);
360 if (err < 0) {
361 LOG_ERR("SPI pinctrl setup failed (%d)", err);
362 return err;
363 }
364
365 if ((hc_spi->dma_rx->dma_dev != NULL) && !device_is_ready(hc_spi->dma_rx->dma_dev)) {
366 LOG_ERR("%s device not ready", hc_spi->dma_rx->dma_dev->name);
367 return -ENODEV;
368 }
369
370 if ((hc_spi->dma_tx->dma_dev != NULL) && !device_is_ready(hc_spi->dma_tx->dma_dev)) {
371 LOG_ERR("%s device not ready", hc_spi->dma_tx->dma_dev->name);
372 return -ENODEV;
373 }
374
375 return 0;
376 }
377
spi_configure(const struct ec_host_cmd_spi_ctx * hc_spi)378 static int spi_configure(const struct ec_host_cmd_spi_ctx *hc_spi)
379 {
380 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
381 SPI_TypeDef *spi = cfg->spi;
382
383 #if defined(LL_SPI_PROTOCOL_MOTOROLA) && defined(SPI_CR2_FRF)
384 LL_SPI_SetStandard(spi, LL_SPI_PROTOCOL_MOTOROLA);
385 #endif
386
387 /* Disable before configuration */
388 LL_SPI_Disable(spi);
389 /* Set clock signal configuration */
390 LL_SPI_SetClockPolarity(spi, LL_SPI_POLARITY_LOW);
391 LL_SPI_SetClockPhase(spi, LL_SPI_PHASE_1EDGE);
392 /* Set protocol parameters */
393 LL_SPI_SetTransferDirection(spi, LL_SPI_FULL_DUPLEX);
394 LL_SPI_SetTransferBitOrder(spi, LL_SPI_MSB_FIRST);
395 LL_SPI_DisableCRC(spi);
396 LL_SPI_SetDataWidth(spi, LL_SPI_DATAWIDTH_8BIT);
397 /* Set slave options */
398 LL_SPI_SetNSSMode(spi, LL_SPI_NSS_HARD_INPUT);
399 LL_SPI_SetMode(spi, LL_SPI_MODE_SLAVE);
400
401 #ifdef EC_HOST_CMD_ST_STM32H7
402 LL_SPI_SetUDRConfiguration(spi, LL_SPI_UDR_CONFIG_REGISTER_PATTERN);
403 LL_SPI_SetUDRDetection(spi, LL_SPI_UDR_DETECT_END_DATA_FRAME);
404 #endif /* EC_HOST_CMD_ST_STM32H7 */
405
406 #ifdef EC_HOST_CMD_ST_STM32_FIFO
407 #ifdef EC_HOST_CMD_ST_STM32H7
408 LL_SPI_SetFIFOThreshold(spi, LL_SPI_FIFO_TH_01DATA);
409 #else
410 LL_SPI_SetRxFIFOThreshold(spi, LL_SPI_RX_FIFO_TH_QUARTER);
411 #endif /* EC_HOST_CMD_ST_STM32H7 */
412 #endif /* EC_HOST_CMD_ST_STM32_FIFO */
413
414 return 0;
415 }
416
reload_dma_tx(struct ec_host_cmd_spi_ctx * hc_spi,size_t len)417 static int reload_dma_tx(struct ec_host_cmd_spi_ctx *hc_spi, size_t len)
418 {
419 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
420 SPI_TypeDef *spi = cfg->spi;
421 int ret;
422
423 /* Set DMA at the beggining of the TX buffer and set the number of bytes to send */
424 ret = dma_reload(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel, (uint32_t)hc_spi->tx_buf,
425 dma_dest_addr(spi), len);
426 if (ret != 0) {
427 return ret;
428 }
429
430 /* Start DMA transfer */
431 ret = dma_start(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel);
432 if (ret != 0) {
433 return ret;
434 }
435 #ifdef EC_HOST_CMD_ST_STM32H7
436 LL_SPI_ClearFlag_UDR(spi);
437 #endif
438
439 return 0;
440 }
441
spi_config_dma_tx(struct ec_host_cmd_spi_ctx * hc_spi)442 static int spi_config_dma_tx(struct ec_host_cmd_spi_ctx *hc_spi)
443 {
444 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
445 SPI_TypeDef *spi = cfg->spi;
446 struct dma_block_config *blk_cfg;
447 struct dma_stream *stream = hc_spi->dma_tx;
448 int ret;
449
450 blk_cfg = &stream->dma_blk_cfg;
451
452 /* Set configs for TX. This shouldn't be changed during communication */
453 memset(blk_cfg, 0, sizeof(struct dma_block_config));
454 blk_cfg->block_size = 0;
455
456 /* The destination address is the SPI register */
457 blk_cfg->dest_address = dma_dest_addr(spi);
458 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
459
460 blk_cfg->source_address = (uint32_t)hc_spi->tx_buf;
461 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_INCREMENT;
462
463 blk_cfg->fifo_mode_control = hc_spi->dma_tx->fifo_threshold;
464
465 stream->dma_cfg.head_block = blk_cfg;
466 stream->dma_cfg.user_data = hc_spi;
467
468 /* Configure the TX the channels */
469 ret = dma_config(hc_spi->dma_tx->dma_dev, hc_spi->dma_tx->channel, &stream->dma_cfg);
470
471 if (ret != 0) {
472 return ret;
473 }
474
475 return 0;
476 }
477
reload_dma_rx(struct ec_host_cmd_spi_ctx * hc_spi)478 static int reload_dma_rx(struct ec_host_cmd_spi_ctx *hc_spi)
479 {
480 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
481 SPI_TypeDef *spi = cfg->spi;
482 int ret;
483
484 /* Reload DMA to the beginning of the RX buffer */
485 ret = dma_reload(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, dma_source_addr(spi),
486 (uint32_t)hc_spi->rx_ctx->buf, CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE);
487 if (ret != 0) {
488 return ret;
489 }
490
491 ret = dma_start(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel);
492 if (ret != 0) {
493 return ret;
494 }
495
496 return 0;
497 }
498
spi_config_dma_rx(struct ec_host_cmd_spi_ctx * hc_spi)499 static int spi_config_dma_rx(struct ec_host_cmd_spi_ctx *hc_spi)
500 {
501 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
502 SPI_TypeDef *spi = cfg->spi;
503 struct dma_block_config *blk_cfg;
504 struct dma_stream *stream = hc_spi->dma_rx;
505 int ret;
506
507 blk_cfg = &stream->dma_blk_cfg;
508
509 /* Set configs for RX. This shouldn't be changed during communication */
510 memset(blk_cfg, 0, sizeof(struct dma_block_config));
511 blk_cfg->block_size = CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE;
512
513 /* The destination address is our RX buffer */
514 blk_cfg->dest_address = (uint32_t)hc_spi->rx_ctx->buf;
515 blk_cfg->dest_addr_adj = DMA_ADDR_ADJ_INCREMENT;
516
517 /* The source address is the SPI register */
518 blk_cfg->source_address = dma_source_addr(spi);
519 blk_cfg->source_addr_adj = DMA_ADDR_ADJ_NO_CHANGE;
520
521 blk_cfg->fifo_mode_control = hc_spi->dma_rx->fifo_threshold;
522
523 stream->dma_cfg.head_block = blk_cfg;
524 stream->dma_cfg.user_data = hc_spi;
525
526 /* Configure the RX the channels */
527 ret = dma_config(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, &stream->dma_cfg);
528
529 return ret;
530 }
531
prepare_rx(struct ec_host_cmd_spi_ctx * hc_spi)532 static int prepare_rx(struct ec_host_cmd_spi_ctx *hc_spi)
533 {
534 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
535 SPI_TypeDef *spi = cfg->spi;
536 int ret;
537
538 hc_spi->prepare_rx_later = 0;
539
540 #ifdef EC_HOST_CMD_ST_STM32H7
541 /* As described in RM0433 "To restart the internal state machine
542 * properly, SPI is strongly suggested to be disabled and re-enabled
543 * before next transaction starts despite its setting is not changed.",
544 * disable and re-enable the SPI module. Without that, the SPI module
545 * receives the first byte on a next transaction incorrently - it is
546 * always 0x00.
547 * It also clears RX FIFO, so there is no needed to read the remianing
548 * bytes manually.
549 */
550 LL_SPI_Disable(spi);
551 LL_SPI_Enable(spi);
552 #else /* EC_HOST_CMD_ST_STM32H7 */
553 /* Flush RX buffer. It clears the RXNE(RX not empty) flag not to trigger
554 * the DMA transfer at the beginning of a new SPI transfer. The flag is
555 * set while sending response to host. The number of bytes to read can
556 * be bigger than 1 for chip families than need to bypass the DMA
557 * threshold.
558 */
559 LL_SPI_ReceiveData8(spi);
560 #endif /* EC_HOST_CMD_ST_STM32H7 */
561
562 ret = reload_dma_rx(hc_spi);
563 if (!ret) {
564 tx_status(spi, EC_SPI_RX_READY);
565 hc_spi->state = SPI_HOST_CMD_STATE_READY_TO_RX;
566 }
567
568 return ret;
569 }
570
spi_setup_dma(struct ec_host_cmd_spi_ctx * hc_spi)571 static int spi_setup_dma(struct ec_host_cmd_spi_ctx *hc_spi)
572 {
573 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
574 SPI_TypeDef *spi = cfg->spi;
575 /* retrieve active RX DMA channel (used in callback) */
576 int ret;
577
578 #ifdef EC_HOST_CMD_ST_STM32H7
579 /* Set request before enabling (else SPI CFG1 reg is write protected) */
580 LL_SPI_EnableDMAReq_RX(spi);
581 LL_SPI_EnableDMAReq_TX(spi);
582
583 LL_SPI_Enable(spi);
584 #else /* EC_HOST_CMD_ST_STM32H7 */
585 LL_SPI_Enable(spi);
586 #endif /* !EC_HOST_CMD_ST_STM32H7 */
587
588 ret = spi_config_dma_tx(hc_spi);
589 if (ret != 0) {
590 return ret;
591 }
592
593 ret = spi_config_dma_rx(hc_spi);
594 if (ret != 0) {
595 return ret;
596 }
597
598 /* Start receiving from the SPI Master */
599 ret = dma_start(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel);
600 if (ret != 0) {
601 return ret;
602 }
603
604 #ifndef EC_HOST_CMD_ST_STM32H7
605 /* toggle the DMA request to restart the transfer */
606 LL_SPI_EnableDMAReq_RX(spi);
607 LL_SPI_EnableDMAReq_TX(spi);
608 #endif /* !EC_HOST_CMD_ST_STM32H7 */
609
610 return 0;
611 }
612
wait_for_rx_bytes(struct ec_host_cmd_spi_ctx * hc_spi,int needed)613 static int wait_for_rx_bytes(struct ec_host_cmd_spi_ctx *hc_spi, int needed)
614 {
615 int64_t deadline = k_ticks_to_us_floor64(k_uptime_ticks()) + EC_SPI_CMD_RX_TIMEOUT_US;
616 int64_t current_time;
617 struct dma_status stat;
618 int ret;
619
620 while (1) {
621 uint32_t rx_bytes;
622
623 current_time = k_ticks_to_us_floor64(k_uptime_ticks());
624
625 ret = dma_get_status(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel, &stat);
626 /* RX DMA is always programed to copy buffer size (max command size) */
627 if (ret) {
628 return ret;
629 }
630
631 rx_bytes = CONFIG_EC_HOST_CMD_HANDLER_RX_BUFFER_SIZE - stat.pending_length;
632 if (rx_bytes >= needed) {
633 return 0;
634 }
635
636 /* Make sure the SPI transfer is ongoing */
637 ret = gpio_pin_get(hc_spi->cs.port, hc_spi->cs.pin);
638 if (ret) {
639 /* End of transfer - return instantly */
640 return ret;
641 }
642
643 if (current_time >= deadline) {
644 /* Timeout */
645 return -EIO;
646 }
647 }
648 }
649
gpio_cb_nss(const struct device * port,struct gpio_callback * cb,gpio_port_pins_t pins)650 void gpio_cb_nss(const struct device *port, struct gpio_callback *cb, gpio_port_pins_t pins)
651 {
652 struct ec_host_cmd_spi_ctx *hc_spi =
653 CONTAINER_OF(cb, struct ec_host_cmd_spi_ctx, cs_callback);
654 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
655 SPI_TypeDef *spi = cfg->spi;
656 int ret;
657
658 /* CS deasserted. Setup for the next transaction */
659 if (gpio_pin_get(hc_spi->cs.port, hc_spi->cs.pin)) {
660 ec_host_cmd_pm_policy_state_lock_put(hc_spi);
661
662 /* CS asserted during processing a command. Prepare for receiving after
663 * sending response.
664 */
665 if (hc_spi->state == SPI_HOST_CMD_STATE_PROCESSING) {
666 hc_spi->prepare_rx_later = 1;
667 return;
668 }
669
670 ret = prepare_rx(hc_spi);
671 if (ret) {
672 LOG_ERR("Failed to prepare RX after CS deassertion");
673 }
674
675 return;
676 }
677
678 /* CS asserted. Receive full packet and call general handler */
679 if (hc_spi->state == SPI_HOST_CMD_STATE_READY_TO_RX) {
680 /* The SPI module and DMA are already configured and ready to receive data.
681 * Consider disabling the SPI module at the end of sending response and
682 * reenabling it here if there is a need to disable reset SPI module,
683 * because of unexpected states.
684 */
685 int exp_size;
686
687 hc_spi->state = SPI_HOST_CMD_STATE_RECEIVING;
688 /* Don't allow system to suspend until the end of transfer. */
689 ec_host_cmd_pm_policy_state_lock_get(hc_spi);
690
691 /* Set TX register to send status */
692 tx_status(spi, EC_SPI_RECEIVING);
693
694 /* Get the header */
695 if (wait_for_rx_bytes(hc_spi, RX_HEADER_SIZE)) {
696 goto spi_bad_rx;
697 }
698
699 exp_size = expected_size((struct ec_host_cmd_request_header *)hc_spi->rx_ctx->buf);
700 /* Get data bytes */
701 if (exp_size > RX_HEADER_SIZE) {
702 if (wait_for_rx_bytes(hc_spi, exp_size)) {
703 goto spi_bad_rx;
704 }
705 }
706
707 hc_spi->rx_ctx->len = exp_size;
708 hc_spi->state = SPI_HOST_CMD_STATE_PROCESSING;
709 tx_status(spi, EC_SPI_PROCESSING);
710 ec_host_cmd_rx_notify();
711
712 return;
713 }
714
715 spi_bad_rx:
716 tx_status(spi, EC_SPI_NOT_READY);
717 hc_spi->state = SPI_HOST_CMD_STATE_RX_BAD;
718 }
719
ec_host_cmd_spi_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)720 static int ec_host_cmd_spi_init(const struct ec_host_cmd_backend *backend,
721 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
722 {
723 int ret;
724 struct ec_host_cmd_spi_ctx *hc_spi = (struct ec_host_cmd_spi_ctx *)backend->ctx;
725 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
726 SPI_TypeDef *spi = cfg->spi;
727
728 hc_spi->state = SPI_HOST_CMD_STATE_DISABLED;
729
730 /* SPI backend needs rx and tx buffers provided by the handler */
731 if (!rx_ctx->buf || !tx->buf || !hc_spi->cs.port) {
732 return -EIO;
733 }
734
735 hc_spi->rx_ctx = rx_ctx;
736 hc_spi->rx_ctx->len = 0;
737
738 /* Buffer to transmit */
739 hc_spi->tx_buf = tx->buf;
740 hc_spi->tx = tx;
741 /* Buffer for response from HC handler. Make space for preamble */
742 hc_spi->tx->buf = (uint8_t *)hc_spi->tx->buf + sizeof(out_preamble);
743 hc_spi->tx->len_max = hc_spi->tx->len_max - sizeof(out_preamble) - EC_SPI_PAST_END_LENGTH;
744
745 /* Limit the requset/response max sizes */
746 if (hc_spi->rx_ctx->len_max > SPI_MAX_REQ_SIZE) {
747 hc_spi->rx_ctx->len_max = SPI_MAX_REQ_SIZE;
748 }
749 if (hc_spi->tx->len_max > SPI_MAX_RESP_SIZE) {
750 hc_spi->tx->len_max = SPI_MAX_RESP_SIZE;
751 }
752
753 ret = spi_init(hc_spi);
754 if (ret) {
755 return ret;
756 }
757
758 ret = spi_configure(hc_spi);
759 if (ret) {
760 return ret;
761 }
762
763 ret = spi_setup_dma(hc_spi);
764 if (ret) {
765 return ret;
766 }
767
768 tx_status(spi, EC_SPI_RX_READY);
769 hc_spi->state = SPI_HOST_CMD_STATE_READY_TO_RX;
770
771 /* Configure CS interrupt once everything is ready. */
772 gpio_init_callback(&hc_spi->cs_callback, gpio_cb_nss, BIT(hc_spi->cs.pin));
773 gpio_add_callback(hc_spi->cs.port, &hc_spi->cs_callback);
774 gpio_pin_interrupt_configure(hc_spi->cs.port, hc_spi->cs.pin, GPIO_INT_EDGE_BOTH);
775
776 return ret;
777 }
778
ec_host_cmd_spi_send(const struct ec_host_cmd_backend * backend)779 static int ec_host_cmd_spi_send(const struct ec_host_cmd_backend *backend)
780 {
781 struct ec_host_cmd_spi_ctx *hc_spi = (struct ec_host_cmd_spi_ctx *)backend->ctx;
782 int ret = 0;
783 int tx_size;
784
785 dma_stop(hc_spi->dma_rx->dma_dev, hc_spi->dma_rx->channel);
786
787 /* Add state bytes at the beggining and the end of the buffer to transmit */
788 memcpy(hc_spi->tx_buf, out_preamble, sizeof(out_preamble));
789 for (int i = 0; i < EC_SPI_PAST_END_LENGTH; i++) {
790 hc_spi->tx_buf[sizeof(out_preamble) + hc_spi->tx->len + i] = EC_SPI_PAST_END;
791 }
792 tx_size = hc_spi->tx->len + sizeof(out_preamble) + EC_SPI_PAST_END_LENGTH;
793
794 hc_spi->state = SPI_HOST_CMD_STATE_SENDING;
795
796 ret = reload_dma_tx(hc_spi, tx_size);
797 if (ret) {
798 LOG_ERR("Failed to send response");
799 }
800
801 return ret;
802 }
803
804 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
805 .init = &ec_host_cmd_spi_init,
806 .send = &ec_host_cmd_spi_send,
807 };
808
809 EC_HOST_CMD_SPI_DEFINE(ec_host_cmd_spi);
810
ec_host_cmd_backend_get_spi(struct gpio_dt_spec * cs)811 struct ec_host_cmd_backend *ec_host_cmd_backend_get_spi(struct gpio_dt_spec *cs)
812 {
813 struct ec_host_cmd_spi_ctx *hc_spi = ec_host_cmd_spi.ctx;
814
815 hc_spi->cs = *cs;
816
817 return &ec_host_cmd_spi;
818 }
819
820 #ifdef CONFIG_PM_DEVICE
ec_host_cmd_spi_stm32_pm_action(const struct device * dev,enum pm_device_action action)821 static int ec_host_cmd_spi_stm32_pm_action(const struct device *dev, enum pm_device_action action)
822 {
823 const struct ec_host_cmd_backend *backend = (struct ec_host_cmd_backend *)dev->data;
824 struct ec_host_cmd_spi_ctx *hc_spi = (struct ec_host_cmd_spi_ctx *)backend->ctx;
825 const struct ec_host_cmd_spi_cfg *cfg = hc_spi->spi_config;
826 int err;
827
828 switch (action) {
829 case PM_DEVICE_ACTION_RESUME:
830 /* Set pins to active state */
831 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
832 if (err < 0) {
833 return err;
834 }
835
836 /* Enable device clock */
837 err = clock_control_on(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
838 (clock_control_subsys_t)&cfg->pclken[0]);
839 if (err < 0) {
840 return err;
841 }
842 /* Enable CS interrupts. */
843 if (hc_spi->cs.port) {
844 gpio_pin_interrupt_configure_dt(&hc_spi->cs, GPIO_INT_EDGE_BOTH);
845 }
846
847 break;
848 case PM_DEVICE_ACTION_SUSPEND:
849 #ifdef SPI_SR_BSY
850 /* Wait 10ms for the end of transaction to prevent corruption of the last
851 * transfer
852 */
853 WAIT_FOR((LL_SPI_IsActiveFlag_BSY(cfg->spi) == 0), 10 * USEC_PER_MSEC, NULL);
854 #endif
855 /* Disable unnecessary interrupts. */
856 if (hc_spi->cs.port) {
857 gpio_pin_interrupt_configure_dt(&hc_spi->cs, GPIO_INT_DISABLE);
858 }
859
860 /* Stop device clock. */
861 err = clock_control_off(DEVICE_DT_GET(STM32_CLOCK_CONTROL_NODE),
862 (clock_control_subsys_t)&cfg->pclken[0]);
863 if (err != 0) {
864 return err;
865 }
866
867 /* Move pins to sleep state */
868 err = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_SLEEP);
869 if ((err < 0) && (err != -ENOENT)) {
870 /* If returning -ENOENT, no pins where defined for sleep mode. */
871 return err;
872 }
873
874 break;
875 default:
876 return -ENOTSUP;
877 }
878
879 return 0;
880 }
881 #endif /* CONFIG_PM_DEVICE */
882
883 PM_DEVICE_DT_DEFINE(DT_CHOSEN(zephyr_host_cmd_spi_backend), ec_host_cmd_spi_stm32_pm_action);
884
885 DEVICE_DT_DEFINE(DT_CHOSEN(zephyr_host_cmd_spi_backend), NULL,
886 PM_DEVICE_DT_GET(DT_CHOSEN(zephyr_host_cmd_spi_backend)), &ec_host_cmd_spi, NULL,
887 PRE_KERNEL_1, CONFIG_EC_HOST_CMD_INIT_PRIORITY, NULL);
888
889 #ifdef CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT
host_cmd_init(void)890 static int host_cmd_init(void)
891 {
892 struct gpio_dt_spec cs = GPIO_DT_SPEC_GET(DT_CHOSEN(zephyr_host_cmd_spi_backend), cs_gpios);
893
894 ec_host_cmd_init(ec_host_cmd_backend_get_spi(&cs));
895
896 return 0;
897 }
898 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
899
900 #endif
901