1 /*
2 * Copyright (c) 2022 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "ec_host_cmd_backend_shi.h"
8
9 #include <zephyr/drivers/clock_control.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/mgmt/ec_host_cmd/backend.h>
13 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
14 #include <zephyr/pm/device.h>
15 #include <zephyr/pm/device_runtime.h>
16 #include <zephyr/pm/policy.h>
17
18 #include <soc_miwu.h>
19
20 #if DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi)
21 #define DT_DRV_COMPAT nuvoton_npcx_shi
22 #elif DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi_enhanced)
23 #define DT_DRV_COMPAT nuvoton_npcx_shi_enhanced
24 #endif
25 BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "Invalid number of NPCX SHI peripherals");
26 BUILD_ASSERT(!(DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi) &&
27 DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi_enhanced)));
28
29 LOG_MODULE_REGISTER(host_cmd_shi_npcx, CONFIG_EC_HC_LOG_LEVEL);
30
31 /* Driver convenience defines */
32 #define HAL_INSTANCE(dev) (struct shi_reg *)(((const struct shi_npcx_config *)(dev)->config)->base)
33
34 /* Full output buffer size */
35 #define SHI_OBUF_FULL_SIZE DT_INST_PROP(0, buffer_tx_size)
36 /* Full input buffer size */
37 #define SHI_IBUF_FULL_SIZE DT_INST_PROP(0, buffer_rx_size)
38 /* Configure the IBUFLVL2 = the size of V3 protocol header */
39 #define SHI_IBUFLVL2_THRESHOLD (sizeof(struct ec_host_cmd_request_header))
40 /* Half output buffer size */
41 #define SHI_OBUF_HALF_SIZE (SHI_OBUF_FULL_SIZE / 2)
42 /* Half input buffer size */
43 #define SHI_IBUF_HALF_SIZE (SHI_IBUF_FULL_SIZE / 2)
44
45 /*
46 * Timeout to wait for SHI request packet
47 *
48 * This affects the slowest SPI clock we can support. A delay of 8192 us permits a 512-byte request
49 * at 500 KHz, assuming the SPI controller starts sending bytes as soon as it asserts chip select.
50 * That's as slow as we would practically want to run the SHI interface, since running it slower
51 * significantly impacts firmware update times.
52 */
53 #define EC_SHI_CMD_RX_TIMEOUT_US 8192
54
55 /*
56 * The AP blindly clocks back bytes over the SPI interface looking for a framing byte.
57 * So this preamble must always precede the actual response packet.
58 */
59 #define EC_SHI_OUT_PREAMBLE_LENGTH 2
60
61 /*
62 * Space allocation of the past-end status byte (EC_SHI_PAST_END) in the out_msg buffer.
63 */
64 #define EC_SHI_PAST_END_LENGTH 1
65
66 /*
67 * Space allocation of the frame status byte (EC_SHI_FRAME_START) in the out_msg buffer.
68 */
69 #define EC_SHI_FRAME_START_LENGTH 1
70
71 /*
72 * Offset of output parameters needs to account for pad and framing bytes and
73 * one last past-end byte at the end so any additional bytes clocked out by
74 * the AP will have a known and identifiable value.
75 */
76 #define EC_SHI_PROTO3_OVERHEAD (EC_SHI_PAST_END_LENGTH + EC_SHI_FRAME_START_LENGTH)
77
78 /*
79 * Our input and output msg buffers. These must be large enough for our largest
80 * message, including protocol overhead. The pointers after the protocol
81 * overhead, as passed to the host command handler, must be 32-bit aligned.
82 */
83 #define SHI_OUT_START_PAD (4 * (EC_SHI_FRAME_START_LENGTH / 4 + 1))
84 #define SHI_OUT_END_PAD (4 * (EC_SHI_PAST_END_LENGTH / 4 + 1))
85
86 enum shi_npcx_state {
87 SHI_STATE_NONE = -1,
88 /* SHI not enabled (initial state, and when chipset is off) */
89 SHI_STATE_DISABLED = 0,
90 /* Ready to receive next request */
91 SHI_STATE_READY_TO_RECV,
92 /* Receiving request */
93 SHI_STATE_RECEIVING,
94 /* Processing request */
95 SHI_STATE_PROCESSING,
96 /* Canceling response since CS deasserted and output NOT_READY byte */
97 SHI_STATE_CNL_RESP_NOT_RDY,
98 /* Sending response */
99 SHI_STATE_SENDING,
100 /* Received data is invalid */
101 SHI_STATE_BAD_RECEIVED_DATA,
102 };
103
104 enum shi_npcx_pm_policy_state_flag {
105 SHI_NPCX_PM_POLICY_FLAG,
106 SHI_NPCX_PM_POLICY_FLAG_COUNT,
107 };
108
109 /* Device config */
110 struct shi_npcx_config {
111 /* Serial Host Interface (SHI) base address */
112 uintptr_t base;
113 /* Clock configuration */
114 struct npcx_clk_cfg clk_cfg;
115 /* Pin control configuration */
116 const struct pinctrl_dev_config *pcfg;
117 /* Chip-select interrupts */
118 int irq;
119 struct npcx_wui shi_cs_wui;
120 };
121
122 struct shi_npcx_data {
123 struct ec_host_cmd_rx_ctx *rx_ctx;
124 struct ec_host_cmd_tx_buf *tx;
125 /* Communication status */
126 enum shi_npcx_state state;
127 enum shi_npcx_state last_error_state;
128 uint8_t *rx_msg; /* Entry pointer of msg rx buffer */
129 uint8_t *tx_msg; /* Entry pointer of msg tx buffer */
130 volatile uint8_t *rx_buf; /* Entry pointer of receive buffer */
131 volatile uint8_t *tx_buf; /* Entry pointer of transmit buffer */
132 uint16_t sz_sending; /* Size of sending data in bytes */
133 uint16_t sz_request; /* Request bytes need to receive */
134 uint16_t sz_response; /* Response bytes need to receive */
135 uint64_t rx_deadline; /* Deadline of receiving */
136 /* Buffers */
137 uint8_t out_msg_padded[SHI_OUT_START_PAD + CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_RESPONSE +
138 SHI_OUT_END_PAD] __aligned(4);
139 uint8_t *const out_msg;
140 uint8_t in_msg[CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_REQUEST] __aligned(4);
141 ATOMIC_DEFINE(pm_policy_state_flag, SHI_NPCX_PM_POLICY_FLAG_COUNT);
142 #ifdef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
143 struct miwu_callback shi_cs_wui_cb;
144 /*
145 * With the workaround, CS assertion/de-assertion INT and SHI module's INT come from
146 * different sources. CS failing IRQ and IBHF2 IRQ may happen at the same time.
147 * In this case, IBHF2 ISR is called first because it has lower INT number.
148 * (with the same priority). This flag is used to guarantee CS assertion ISR is executed
149 * first.
150 */
151 bool is_entered_cs_asserted_wui_isr;
152 #endif
153 };
154
155 struct ec_host_cmd_shi_npcx_ctx {
156 /* SHI device instance */
157 const struct device *dev;
158 };
159
160 #define EC_HOST_CMD_SHI_NPCX_DEFINE(_name) \
161 static struct ec_host_cmd_shi_npcx_ctx _name##_hc_shi_npcx; \
162 struct ec_host_cmd_backend _name = { \
163 .api = &ec_host_cmd_api, \
164 .ctx = (struct ec_host_cmd_shi_npcx_ctx *)&_name##_hc_shi_npcx, \
165 }
166
167 /* Forward declaration */
168 static void shi_npcx_reset_prepare(const struct device *dev);
169
shi_npcx_pm_policy_state_lock_get(struct shi_npcx_data * data,enum shi_npcx_pm_policy_state_flag flag)170 static void shi_npcx_pm_policy_state_lock_get(struct shi_npcx_data *data,
171 enum shi_npcx_pm_policy_state_flag flag)
172 {
173 if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
174 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
175 }
176 }
177
shi_npcx_pm_policy_state_lock_put(struct shi_npcx_data * data,enum shi_npcx_pm_policy_state_flag flag)178 static void shi_npcx_pm_policy_state_lock_put(struct shi_npcx_data *data,
179 enum shi_npcx_pm_policy_state_flag flag)
180 {
181 if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
182 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
183 }
184 }
185
186 /* Read pointer of input or output buffer by consecutive reading */
shi_npcx_read_buf_pointer(struct shi_reg * const inst)187 static uint32_t shi_npcx_read_buf_pointer(struct shi_reg *const inst)
188 {
189 uint8_t stat;
190
191 /* Wait for two consecutive equal values read */
192 do {
193 stat = inst->IBUFSTAT;
194 } while (stat != inst->IBUFSTAT);
195
196 return (uint32_t)stat;
197 }
198
199 /*
200 * Write pointer of output buffer by consecutive reading
201 * Note: this function (OBUFSTAT) should only be usd in Enhanced Buffer Mode.
202 */
shi_npcx_write_buf_pointer(struct shi_reg * const inst)203 static uint32_t shi_npcx_write_buf_pointer(struct shi_reg *const inst)
204 {
205 uint8_t stat;
206
207 /* Wait for two consecutive equal values are read */
208 do {
209 stat = inst->OBUFSTAT;
210 } while (stat != inst->OBUFSTAT);
211
212 return stat;
213 }
214
215 /*
216 * Valid offset of SHI output buffer to write.
217 * - In Simultaneous Standard FIFO Mode (SIMUL = 1 and EBUFMD = 0):
218 * OBUFPTR cannot be used. IBUFPTR can be used instead because it points to
219 * the same location as OBUFPTR.
220 * - In Simultaneous Enhanced FIFO Mode (SIMUL = 1 and EBUFMD = 1):
221 * IBUFPTR may not point to the same location as OBUFPTR.
222 * In this case OBUFPTR reflects the 128-byte payload buffer pointer only
223 * during the SPI transaction.
224 */
shi_npcx_valid_obuf_offset(struct shi_reg * const inst)225 static uint32_t shi_npcx_valid_obuf_offset(struct shi_reg *const inst)
226 {
227 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
228 return shi_npcx_write_buf_pointer(inst) % SHI_OBUF_FULL_SIZE;
229 } else {
230 return (shi_npcx_read_buf_pointer(inst) + EC_SHI_OUT_PREAMBLE_LENGTH) %
231 SHI_OBUF_FULL_SIZE;
232 }
233 }
234
235 /*
236 * This routine write SHI next half output buffer from msg buffer
237 */
shi_npcx_write_half_outbuf(const struct device * dev)238 static void shi_npcx_write_half_outbuf(const struct device *dev)
239 {
240 struct shi_npcx_data *data = dev->data;
241
242 const uint32_t size = MIN(SHI_OBUF_HALF_SIZE, data->sz_response - data->sz_sending);
243 uint8_t *obuf_ptr = (uint8_t *)data->tx_buf;
244 const uint8_t *obuf_end = obuf_ptr + size;
245 uint8_t *msg_ptr = data->tx_msg;
246
247 /* Fill half output buffer */
248 while (obuf_ptr != obuf_end) {
249 *obuf_ptr++ = *msg_ptr++;
250 }
251
252 data->sz_sending += size;
253 data->tx_buf = obuf_ptr;
254 data->tx_msg = msg_ptr;
255 }
256
257 /*
258 * This routine read SHI input buffer to msg buffer until
259 * we have received a certain number of bytes
260 */
shi_npcx_read_inbuf_wait(const struct device * dev,uint32_t szbytes)261 static int shi_npcx_read_inbuf_wait(const struct device *dev, uint32_t szbytes)
262 {
263 struct shi_npcx_data *data = dev->data;
264 struct shi_reg *const inst = HAL_INSTANCE(dev);
265
266 /* Copy data to msg buffer from input buffer */
267 for (uint32_t i = 0; i < szbytes; i++, data->rx_ctx->len++) {
268 /*
269 * If input buffer pointer equals pointer which wants to read,
270 * it means data is not ready.
271 */
272 while (data->rx_buf == inst->IBUF + shi_npcx_read_buf_pointer(inst)) {
273 if (k_cycle_get_64() >= data->rx_deadline) {
274 return 0;
275 }
276 }
277
278 /* Copy data to msg buffer */
279 *data->rx_msg++ = *data->rx_buf++;
280 }
281 return 1;
282 }
283
284 /* This routine fills out all SHI output buffer with status byte */
shi_npcx_fill_out_status(struct shi_reg * const inst,uint8_t status)285 static void shi_npcx_fill_out_status(struct shi_reg *const inst, uint8_t status)
286 {
287 uint8_t start, end;
288 volatile uint8_t *fill_ptr;
289 volatile uint8_t *fill_end;
290 volatile uint8_t *obuf_end;
291
292 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
293 /*
294 * In Enhanced Buffer Mode, SHI module outputs the status code
295 * in SBOBUF repeatedly.
296 */
297 inst->SBOBUF = status;
298
299 return;
300 }
301
302 /*
303 * Disable interrupts in case the interfere by the other interrupts.
304 * Use __disable_irq/__enable_irq instead of using irq_lock/irq_unlock
305 * here because irq_lock/irq_unlock leave some system exceptions (like
306 * SVC, NMI, and faults) still enabled.
307 */
308 __disable_irq();
309
310 /*
311 * Fill out output buffer with status byte and leave a gap for PREAMBLE.
312 * The gap guarantees the synchronization. The critical section should
313 * be done within this gap. No racing happens.
314 */
315 start = shi_npcx_valid_obuf_offset(inst);
316 end = (start + SHI_OBUF_FULL_SIZE - EC_SHI_OUT_PREAMBLE_LENGTH) % SHI_OBUF_FULL_SIZE;
317
318 fill_ptr = inst->OBUF + start;
319 fill_end = inst->OBUF + end;
320 obuf_end = inst->OBUF + SHI_OBUF_FULL_SIZE;
321 while (fill_ptr != fill_end) {
322 *fill_ptr++ = status;
323 if (fill_ptr == obuf_end) {
324 fill_ptr = inst->OBUF;
325 }
326 }
327
328 /* End of critical section */
329 __enable_irq();
330 }
331
332 /* This routine handles shi received unexpected data */
shi_npcx_bad_received_data(const struct device * dev)333 static void shi_npcx_bad_received_data(const struct device *dev)
334 {
335 struct shi_npcx_data *data = dev->data;
336 struct shi_reg *const inst = HAL_INSTANCE(dev);
337
338 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
339 inst->EVENABLE &= ~IBF_IBHF_EN_MASK;
340 }
341
342 /* State machine mismatch, timeout, or protocol we can't handle. */
343 shi_npcx_fill_out_status(inst, EC_SHI_RX_BAD_DATA);
344 data->state = SHI_STATE_BAD_RECEIVED_DATA;
345
346 LOG_ERR("SHI bad data recv");
347 LOG_DBG("BAD-");
348 LOG_HEXDUMP_DBG(data->in_msg, data->rx_ctx->len, "in_msg=");
349
350 /* Reset shi's state machine for error recovery */
351 shi_npcx_reset_prepare(dev);
352
353 LOG_DBG("END");
354 }
355
356 /*
357 * This routine write SHI output buffer from msg buffer over halt of it.
358 * It make sure we have enough time to handle next operations.
359 */
shi_npcx_write_first_pkg_outbuf(const struct device * dev,uint16_t szbytes)360 static void shi_npcx_write_first_pkg_outbuf(const struct device *dev, uint16_t szbytes)
361 {
362 struct shi_npcx_data *data = dev->data;
363 struct shi_reg *const inst = HAL_INSTANCE(dev);
364 uint8_t size, offset;
365 volatile uint8_t *obuf_ptr;
366 volatile uint8_t *obuf_end;
367 uint8_t *msg_ptr;
368 uint32_t half_buf_remain; /* Remains in half buffer are free to write */
369
370 /* Start writing at our current OBUF position */
371 offset = shi_npcx_valid_obuf_offset(inst);
372 obuf_ptr = inst->OBUF + offset;
373 msg_ptr = data->tx_msg;
374
375 /* Fill up to OBUF mid point, or OBUF end */
376 half_buf_remain = SHI_OBUF_HALF_SIZE - (offset % SHI_OBUF_HALF_SIZE);
377 size = MIN(half_buf_remain, szbytes - data->sz_sending);
378 obuf_end = obuf_ptr + size;
379 while (obuf_ptr != obuf_end) {
380 *obuf_ptr++ = *msg_ptr++;
381 }
382
383 /* Track bytes sent for later accounting */
384 data->sz_sending += size;
385
386 /* Write data to beginning of OBUF if we've reached the end */
387 if (obuf_ptr == inst->OBUF + SHI_IBUF_FULL_SIZE) {
388 obuf_ptr = inst->OBUF;
389 }
390
391 /* Fill next half output buffer */
392 size = MIN(SHI_OBUF_HALF_SIZE, szbytes - data->sz_sending);
393 obuf_end = obuf_ptr + size;
394 while (obuf_ptr != obuf_end) {
395 *obuf_ptr++ = *msg_ptr++;
396 }
397
398 /* Track bytes sent / last OBUF position written for later accounting */
399 data->sz_sending += size;
400 data->tx_buf = obuf_ptr;
401 data->tx_msg = msg_ptr;
402 }
403
shi_npcx_handle_host_package(const struct device * dev)404 static void shi_npcx_handle_host_package(const struct device *dev)
405 {
406 struct shi_npcx_data *data = dev->data;
407 struct shi_reg *const inst = HAL_INSTANCE(dev);
408 uint32_t sz_inbuf_int = data->sz_request / SHI_IBUF_HALF_SIZE;
409 uint32_t cnt_inbuf_int = data->rx_ctx->len / SHI_IBUF_HALF_SIZE;
410
411 if (sz_inbuf_int - cnt_inbuf_int) {
412 /* Need to receive data from buffer */
413 return;
414 }
415
416 uint32_t remain_bytes = data->sz_request - data->rx_ctx->len;
417
418 /* Read remaining bytes from input buffer */
419 if (!shi_npcx_read_inbuf_wait(dev, remain_bytes)) {
420 shi_npcx_bad_received_data(dev);
421 return;
422 }
423
424 /* Move to processing state */
425 data->state = SHI_STATE_PROCESSING;
426 LOG_DBG("PRC-");
427
428 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
429 inst->EVENABLE &= ~IBF_IBHF_EN_MASK;
430 }
431
432 /* Fill output buffer to indicate we`re processing request */
433 shi_npcx_fill_out_status(inst, EC_SHI_PROCESSING);
434 data->out_msg[0] = EC_SHI_FRAME_START;
435
436 /* Wake-up the HC handler thread */
437 ec_host_cmd_rx_notify();
438 }
439
shi_npcx_host_request_expected_size(const struct ec_host_cmd_request_header * r)440 static int shi_npcx_host_request_expected_size(const struct ec_host_cmd_request_header *r)
441 {
442 /* Check host request version */
443 if (r->prtcl_ver != EC_HOST_REQUEST_VERSION) {
444 return 0;
445 }
446
447 /* Reserved byte should be 0 */
448 if (r->reserved) {
449 return 0;
450 }
451
452 return sizeof(*r) + r->data_len;
453 }
454
shi_npcx_parse_header(const struct device * dev)455 static void shi_npcx_parse_header(const struct device *dev)
456 {
457 struct shi_npcx_data *data = dev->data;
458
459 /* We're now inside a transaction */
460 data->state = SHI_STATE_RECEIVING;
461 LOG_DBG("RV-");
462
463 /* Setup deadline time for receiving */
464 data->rx_deadline = k_cycle_get_64() + k_us_to_cyc_near64(EC_SHI_CMD_RX_TIMEOUT_US);
465
466 /* Wait for version, command, length bytes */
467 if (!shi_npcx_read_inbuf_wait(dev, 3)) {
468 shi_npcx_bad_received_data(dev);
469 return;
470 }
471
472 if (data->in_msg[0] == EC_HOST_REQUEST_VERSION) {
473 /* Protocol version 3 */
474 struct ec_host_cmd_request_header *r =
475 (struct ec_host_cmd_request_header *)data->in_msg;
476 int pkt_size;
477
478 /*
479 * If request is over half of input buffer, we need to modify the algorithm again.
480 */
481 __ASSERT_NO_MSG(sizeof(*r) < SHI_IBUF_HALF_SIZE);
482
483 /* Wait for the rest of the command header */
484 if (!shi_npcx_read_inbuf_wait(dev, sizeof(*r) - 3)) {
485 shi_npcx_bad_received_data(dev);
486 return;
487 }
488
489 /* Check how big the packet should be */
490 pkt_size = shi_npcx_host_request_expected_size(r);
491 if (pkt_size == 0 || pkt_size > sizeof(data->in_msg)) {
492 shi_npcx_bad_received_data(dev);
493 return;
494 }
495
496 /* Computing total bytes need to receive */
497 data->sz_request = pkt_size;
498
499 shi_npcx_handle_host_package(dev);
500 } else {
501 /* Invalid version number */
502 shi_npcx_bad_received_data(dev);
503 return;
504 }
505 }
506
shi_npcx_sec_ibf_int_enable(struct shi_reg * const inst,int enable)507 static void shi_npcx_sec_ibf_int_enable(struct shi_reg *const inst, int enable)
508 {
509 if (enable) {
510 /* Setup IBUFLVL2 threshold and enable it */
511 inst->SHICFG5 |= BIT(NPCX_SHICFG5_IBUFLVL2DIS);
512 SET_FIELD(inst->SHICFG5, NPCX_SHICFG5_IBUFLVL2, SHI_IBUFLVL2_THRESHOLD);
513 inst->SHICFG5 &= ~BIT(NPCX_SHICFG5_IBUFLVL2DIS);
514
515 /* Enable IBHF2 event */
516 inst->EVENABLE2 |= BIT(NPCX_EVENABLE2_IBHF2EN);
517 } else {
518 /* Disable IBHF2 event first */
519 inst->EVENABLE2 &= ~BIT(NPCX_EVENABLE2_IBHF2EN);
520
521 /* Disable IBUFLVL2 and set threshold back to zero */
522 inst->SHICFG5 |= BIT(NPCX_SHICFG5_IBUFLVL2DIS);
523 SET_FIELD(inst->SHICFG5, NPCX_SHICFG5_IBUFLVL2, 0);
524 }
525 }
526
527 /* This routine copies SHI half input buffer data to msg buffer */
shi_npcx_read_half_inbuf(const struct device * dev)528 static void shi_npcx_read_half_inbuf(const struct device *dev)
529 {
530 struct shi_npcx_data *data = dev->data;
531
532 /*
533 * Copy to read buffer until reaching middle/top address of
534 * input buffer or completing receiving data
535 */
536 do {
537 /* Restore data to msg buffer */
538 *data->rx_msg++ = *data->rx_buf++;
539 data->rx_ctx->len++;
540 } while (data->rx_ctx->len % SHI_IBUF_HALF_SIZE && data->rx_ctx->len != data->sz_request);
541 }
542
543 /*
544 * Avoid spamming the console with prints every IBF / IBHF interrupt, if
545 * we find ourselves in an unexpected state.
546 */
shi_npcx_log_unexpected_state(const struct device * dev,char * isr_name)547 static void shi_npcx_log_unexpected_state(const struct device *dev, char *isr_name)
548 {
549 struct shi_npcx_data *data = dev->data;
550
551 if (data->state != data->last_error_state) {
552 LOG_ERR("Unexpected state %d in %s ISR", data->state, isr_name);
553 }
554
555 data->last_error_state = data->state;
556 }
557
shi_npcx_handle_cs_assert(const struct device * dev)558 static void shi_npcx_handle_cs_assert(const struct device *dev)
559 {
560 struct shi_reg *const inst = HAL_INSTANCE(dev);
561 struct shi_npcx_data *data = dev->data;
562
563 /* If not enabled, ignore glitches on SHI_CS_L */
564 if (data->state == SHI_STATE_DISABLED) {
565 return;
566 }
567
568 /* NOT_READY should be sent and there're no spi transaction now. */
569 if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
570 return;
571 }
572
573 /* Chip select is low = asserted */
574 if (data->state != SHI_STATE_READY_TO_RECV) {
575 /* State machine should be reset in EVSTAT_EOR ISR */
576 LOG_ERR("Unexpected state %d in CS ISR", data->state);
577 return;
578 }
579
580 LOG_DBG("CSL-");
581
582 /*
583 * Clear possible EOR event from previous transaction since it's
584 * irrelevant now that CS is re-asserted.
585 */
586 inst->EVSTAT = BIT(NPCX_EVSTAT_EOR);
587
588 shi_npcx_pm_policy_state_lock_get(data, SHI_NPCX_PM_POLICY_FLAG);
589 }
590
shi_npcx_handle_cs_deassert(const struct device * dev)591 static void shi_npcx_handle_cs_deassert(const struct device *dev)
592 {
593 struct shi_reg *const inst = HAL_INSTANCE(dev);
594 struct shi_npcx_data *data = dev->data;
595
596 /*
597 * If the buffer is still used by the host command.
598 * Change state machine for response handler.
599 */
600 if (data->state == SHI_STATE_PROCESSING) {
601 /*
602 * Mark not ready to prevent the other
603 * transaction immediately
604 */
605 shi_npcx_fill_out_status(inst, EC_SHI_NOT_READY);
606
607 data->state = SHI_STATE_CNL_RESP_NOT_RDY;
608
609 /*
610 * Disable SHI interrupt, it will remain disabled until shi_send_response_packet()
611 * is called and CS is asserted for a new transaction.
612 */
613 irq_disable(DT_INST_IRQN(0));
614
615 LOG_DBG("CNL-");
616 return;
617 /* Next transaction but we're not ready */
618 } else if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
619 return;
620 }
621
622 /* Error state for checking*/
623 if (data->state != SHI_STATE_SENDING) {
624 shi_npcx_log_unexpected_state(dev, "CS DE-AST");
625 }
626
627 /* reset SHI and prepare to next transaction again */
628 shi_npcx_reset_prepare(dev);
629 LOG_DBG("END\n");
630 }
631
shi_npcx_handle_input_buf_half_full(const struct device * dev)632 static void shi_npcx_handle_input_buf_half_full(const struct device *dev)
633 {
634 struct shi_reg *const inst = HAL_INSTANCE(dev);
635 struct shi_npcx_data *data = dev->data;
636
637 if (data->state == SHI_STATE_RECEIVING) {
638 /* Read data from input to msg buffer */
639 shi_npcx_read_half_inbuf(dev);
640 shi_npcx_handle_host_package(dev);
641 return;
642 } else if (data->state == SHI_STATE_SENDING) {
643 /* Write data from msg buffer to output buffer */
644 if (data->tx_buf == inst->OBUF + SHI_OBUF_FULL_SIZE) {
645 /* Write data from bottom address again */
646 data->tx_buf = inst->OBUF;
647 shi_npcx_write_half_outbuf(dev);
648 }
649 } else if (data->state == SHI_STATE_PROCESSING) {
650 /* Wait for host to handle request */
651 } else {
652 /* Unexpected status */
653 shi_npcx_log_unexpected_state(dev, "IBHF");
654 }
655 }
656
shi_npcx_handle_input_buf_full(const struct device * dev)657 static void shi_npcx_handle_input_buf_full(const struct device *dev)
658 {
659 struct shi_npcx_data *data = dev->data;
660 struct shi_reg *const inst = HAL_INSTANCE(dev);
661
662 if (data->state == SHI_STATE_RECEIVING) {
663 /* read data from input to msg buffer */
664 shi_npcx_read_half_inbuf(dev);
665 /* Read to bottom address again */
666 data->rx_buf = inst->IBUF;
667 shi_npcx_handle_host_package(dev);
668 return;
669 } else if (data->state == SHI_STATE_SENDING) {
670 /* Write data from msg buffer to output buffer */
671 if (data->tx_buf == inst->OBUF + SHI_OBUF_HALF_SIZE) {
672 shi_npcx_write_half_outbuf(dev);
673 }
674
675 return;
676 } else if (data->state == SHI_STATE_PROCESSING) {
677 /* Wait for host to handle request */
678 return;
679 }
680
681 /* Unexpected status */
682 shi_npcx_log_unexpected_state(dev, "IBF");
683 }
684
shi_npcx_isr(const struct device * dev)685 static void shi_npcx_isr(const struct device *dev)
686 {
687 struct shi_reg *const inst = HAL_INSTANCE(dev);
688 uint8_t stat;
689 uint8_t stat2;
690
691 #ifdef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
692 struct shi_npcx_data *data = dev->data;
693
694 if (data->is_entered_cs_asserted_wui_isr != true) {
695 return;
696 }
697 #endif
698
699 /* Read status register and clear interrupt status early */
700 stat = inst->EVSTAT;
701 inst->EVSTAT = stat;
702 stat2 = inst->EVSTAT2;
703
704 #ifndef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
705 /* SHI CS pin is asserted in EVSTAT2 */
706 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_CSNFE)) {
707 /* Clear pending bit of CSNFE */
708 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_CSNFE);
709 LOG_DBG("CSNFE-");
710
711 /*
712 * BUSY bit is set when SHI_CS is asserted. If not, leave it for
713 * SHI_CS de-asserted event.
714 */
715 if (!IS_BIT_SET(inst->SHICFG2, NPCX_SHICFG2_BUSY)) {
716 LOG_DBG("CSNB-");
717 return;
718 }
719
720 shi_npcx_handle_cs_assert(dev);
721 }
722 /*
723 * End of data for read/write transaction. i.e. SHI_CS is deasserted.
724 * Host completed or aborted transaction
725 *
726 * EOR has the limitation that it will not be set even if the SHI_CS is deasserted without
727 * SPI clocks. The new SHI module introduce the CSNRE bit which will be set when SHI_CS is
728 * deasserted regardless of SPI clocks.
729 */
730 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_CSNRE)) {
731 /* Clear pending bit of CSNRE */
732 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_CSNRE);
733
734 /*
735 * We're not in proper state.
736 * Mark not ready to abort next transaction
737 */
738 LOG_DBG("CSH-");
739 shi_npcx_handle_cs_deassert(dev);
740 return;
741 }
742 #endif
743
744 /*
745 * The number of bytes received reaches the size of
746 * protocol V3 header(=8) after CS asserted.
747 */
748 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_IBHF2)) {
749 /* Clear IBHF2 */
750 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_IBHF2);
751 LOG_DBG("HDR-");
752
753 /* Disable second IBF interrupt and start to parse header */
754 shi_npcx_sec_ibf_int_enable(inst, 0);
755 shi_npcx_parse_header(dev);
756 }
757
758 /*
759 * Indicate input/output buffer pointer reaches the half buffer size.
760 * Transaction is processing.
761 */
762 if (IS_BIT_SET(stat, NPCX_EVSTAT_IBHF)) {
763 shi_npcx_handle_input_buf_half_full(dev);
764 return;
765 }
766
767 /*
768 * Indicate input/output buffer pointer reaches the full buffer size.
769 * Transaction is processing.
770 */
771 if (IS_BIT_SET(stat, NPCX_EVSTAT_IBF)) {
772 shi_npcx_handle_input_buf_full(dev);
773 return;
774 }
775 }
776
shi_npcx_reset_prepare(const struct device * dev)777 static void shi_npcx_reset_prepare(const struct device *dev)
778 {
779 struct shi_reg *const inst = HAL_INSTANCE(dev);
780 struct shi_npcx_data *data = dev->data;
781 uint32_t i;
782
783 data->state = SHI_STATE_DISABLED;
784
785 irq_disable(DT_INST_IRQN(0));
786
787 /* Disable SHI unit to clear all status bits */
788 inst->SHICFG1 &= ~BIT(NPCX_SHICFG1_EN);
789
790 /* Initialize parameters of next transaction */
791 data->rx_msg = data->in_msg;
792 data->tx_msg = data->out_msg;
793 data->rx_buf = inst->IBUF;
794 data->tx_buf = inst->OBUF;
795 if (data->rx_ctx) {
796 data->rx_ctx->len = 0;
797 }
798 data->sz_sending = 0;
799 data->sz_request = 0;
800 data->sz_response = 0;
801
802 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
803 inst->SBOBUF = EC_SHI_RX_READY;
804 inst->SBOBUF = EC_SHI_RECEIVING;
805 inst->EVENABLE |= IBF_IBHF_EN_MASK;
806 inst->EVENABLE &= ~(BIT(NPCX_EVENABLE_OBEEN) | BIT(NPCX_EVENABLE_OBHEEN));
807 } else {
808 /*
809 * Fill output buffer to indicate we`re ready to receive next transaction.
810 */
811 for (i = 1; i < SHI_OBUF_FULL_SIZE; i++) {
812 inst->OBUF[i] = EC_SHI_RECEIVING;
813 }
814 inst->OBUF[0] = EC_SHI_RX_READY;
815 }
816
817 /* SHI/Host Write/input buffer wrap-around enable */
818 inst->SHICFG1 = BIT(NPCX_SHICFG1_IWRAP) | BIT(NPCX_SHICFG1_WEN) | BIT(NPCX_SHICFG1_EN);
819
820 data->state = SHI_STATE_READY_TO_RECV;
821 data->last_error_state = SHI_STATE_NONE;
822
823 shi_npcx_sec_ibf_int_enable(inst, 1);
824 irq_enable(DT_INST_IRQN(0));
825
826 shi_npcx_pm_policy_state_lock_put(data, SHI_NPCX_PM_POLICY_FLAG);
827
828 LOG_DBG("RDY-");
829 }
830
shi_npcx_enable(const struct device * dev)831 static int shi_npcx_enable(const struct device *dev)
832 {
833 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
834 const struct shi_npcx_config *const config = dev->config;
835 int ret;
836
837 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
838 if (ret < 0) {
839 LOG_ERR("Turn on SHI clock fail %d", ret);
840 return ret;
841 }
842
843 shi_npcx_reset_prepare(dev);
844 npcx_miwu_irq_disable(&config->shi_cs_wui);
845
846 /* Configure pin control for SHI */
847 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
848 if (ret < 0) {
849 LOG_ERR("shi_npcx pinctrl setup failed (%d)", ret);
850 return ret;
851 }
852
853 NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
854 /*
855 * Clear the pending bit because switching the pinmux (pinctrl) might casue a faking WUI
856 * pending bit set.
857 */
858 npcx_miwu_irq_get_and_clear_pending(&config->shi_cs_wui);
859 npcx_miwu_irq_enable(&config->shi_cs_wui);
860 irq_enable(DT_INST_IRQN(0));
861
862 return 0;
863 }
864
shi_npcx_disable(const struct device * dev)865 static int shi_npcx_disable(const struct device *dev)
866 {
867 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
868 const struct shi_npcx_config *const config = dev->config;
869 struct shi_npcx_data *data = dev->data;
870 int ret;
871
872 data->state = SHI_STATE_DISABLED;
873
874 irq_disable(DT_INST_IRQN(0));
875 npcx_miwu_irq_disable(&config->shi_cs_wui);
876
877 /* Configure pin control back to GPIO */
878 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
879 if (ret < 0) {
880 LOG_ERR("KB Raw pinctrl setup failed (%d)", ret);
881 return ret;
882 }
883
884 ret = clock_control_off(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
885 if (ret < 0) {
886 LOG_ERR("Turn off SHI clock fail %d", ret);
887 return ret;
888 }
889
890 /*
891 * Allow deep sleep again in case CS dropped before ec was
892 * informed in hook function and turn off SHI's interrupt in time.
893 */
894 shi_npcx_pm_policy_state_lock_put(data, SHI_NPCX_PM_POLICY_FLAG);
895
896 return 0;
897 }
898
899 #ifdef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
shi_npcx_cs_wui_isr(const struct device * dev,struct npcx_wui * wui)900 static void shi_npcx_cs_wui_isr(const struct device *dev, struct npcx_wui *wui)
901 {
902
903 struct shi_reg *const inst = HAL_INSTANCE(dev);
904 struct shi_npcx_data *data = dev->data;
905
906 if (IS_BIT_SET(inst->SHICFG2, NPCX_SHICFG2_BUSY)) {
907 data->is_entered_cs_asserted_wui_isr = true;
908 shi_npcx_handle_cs_assert(dev);
909 } else {
910 shi_npcx_handle_cs_deassert(dev);
911 data->is_entered_cs_asserted_wui_isr = false;
912 }
913 }
914 #endif
915
shi_npcx_init_registers(const struct device * dev)916 static int shi_npcx_init_registers(const struct device *dev)
917 {
918 int ret;
919 const struct shi_npcx_config *const config = dev->config;
920 struct shi_reg *const inst = HAL_INSTANCE(dev);
921 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
922
923 /* Turn on shi device clock first */
924 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
925 if (ret < 0) {
926 LOG_ERR("Turn on SHI clock fail %d", ret);
927 return ret;
928 }
929
930 /*
931 * SHICFG1 (SHI Configuration 1) setting
932 * [7] - IWRAP = 1: Wrap input buffer to the first address
933 * [6] - CPOL = 0: Sampling on rising edge and output on falling edge
934 * [5] - DAS = 0: return STATUS reg data after Status command
935 * [4] - AUTOBE = 0: Automatically update the OBES bit in STATUS reg
936 * [3] - AUTIBF = 0: Automatically update the IBFS bit in STATUS reg
937 * [2] - WEN = 0: Enable host write to input buffer
938 * [1] - Reserved 0
939 * [0] - ENABLE = 0: Disable SHI at the beginning
940 */
941 inst->SHICFG1 = BIT(NPCX_SHICFG1_IWRAP);
942
943 /*
944 * SHICFG2 (SHI Configuration 2) setting
945 * [7] - Reserved 0
946 * [6] - REEVEN = 0: Restart events are not used
947 * [5] - Reserved 0
948 * [4] - REEN = 0: Restart transactions are not used
949 * [3] - SLWU = 0: Seem-less wake-up is enabled by default
950 * [2] - ONESHOT= 0: WEN is cleared at the end of a write transaction
951 * [1] - BUSY = 0: SHI bus is busy 0: idle.
952 * [0] - SIMUL = 1: Turn on simultaneous Read/Write
953 */
954 inst->SHICFG2 = BIT(NPCX_SHICFG2_SIMUL);
955
956 /*
957 * EVENABLE (Event Enable) setting
958 * [7] - IBOREN = 0: Input buffer overrun interrupt enable
959 * [6] - STSREN = 0: status read interrupt disable
960 * [5] - EOWEN = 0: End-of-Data for Write Transaction Interrupt Enable
961 * [4] - EOREN = 1: End-of-Data for Read Transaction Interrupt Enable
962 * [3] - IBHFEN = 1: Input Buffer Half Full Interrupt Enable
963 * [2] - IBFEN = 1: Input Buffer Full Interrupt Enable
964 * [1] - OBHEEN = 0: Output Buffer Half Empty Interrupt Enable
965 * [0] - OBEEN = 0: Output Buffer Empty Interrupt Enable
966 */
967 inst->EVENABLE = IBF_IBHF_EN_MASK;
968
969 /*
970 * EVENABLE2 (Event Enable 2) setting
971 * [2] - CSNFEEN = 1: SHI_CS Falling Edge Interrupt Enable
972 * [1] - CSNREEN = 1: SHI_CS Rising Edge Interrupt Enable
973 * [0] - IBHF2EN = 0: Input Buffer Half Full 2 Interrupt Enable
974 */
975 #ifndef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
976 inst->EVENABLE2 = BIT(NPCX_EVENABLE2_CSNREEN) | BIT(NPCX_EVENABLE2_CSNFEEN);
977 #endif
978
979 /* Clear SHI events status register */
980 inst->EVSTAT = 0xff;
981
982 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
983 inst->SHICFG6 |= BIT(NPCX_SHICFG6_EBUFMD);
984 }
985
986 #ifdef CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_CS_DETECT_WORKAROUND
987 struct shi_npcx_data *data = dev->data;
988
989 npcx_miwu_interrupt_configure(&config->shi_cs_wui, NPCX_MIWU_MODE_EDGE,
990 NPCX_MIWU_TRIG_BOTH);
991
992 npcx_miwu_init_dev_callback(&data->shi_cs_wui_cb, &config->shi_cs_wui, shi_npcx_cs_wui_isr,
993 dev);
994 npcx_miwu_manage_callback(&data->shi_cs_wui_cb, true);
995 #else
996 npcx_miwu_interrupt_configure(&config->shi_cs_wui, NPCX_MIWU_MODE_EDGE, NPCX_MIWU_TRIG_LOW);
997 #endif
998
999 /* SHI interrupt installation */
1000 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), shi_npcx_isr, DEVICE_DT_INST_GET(0),
1001 0);
1002
1003 shi_npcx_enable(dev);
1004
1005 return ret;
1006 }
1007
shi_npcx_init(const struct device * dev)1008 static int shi_npcx_init(const struct device *dev)
1009 {
1010 int ret;
1011
1012 ret = shi_npcx_init_registers(dev);
1013 if (ret) {
1014 return ret;
1015 }
1016 pm_device_init_suspended(dev);
1017
1018 return pm_device_runtime_enable(dev);
1019 }
1020
shi_npcx_backend_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)1021 static int shi_npcx_backend_init(const struct ec_host_cmd_backend *backend,
1022 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
1023 {
1024 struct ec_host_cmd_shi_npcx_ctx *hc_shi = (struct ec_host_cmd_shi_npcx_ctx *)backend->ctx;
1025 struct shi_npcx_data *data;
1026
1027 hc_shi->dev = DEVICE_DT_INST_GET(0);
1028 if (!device_is_ready(hc_shi->dev)) {
1029 return -ENODEV;
1030 }
1031
1032 data = hc_shi->dev->data;
1033 data->rx_ctx = rx_ctx;
1034 data->tx = tx;
1035
1036 rx_ctx->buf = data->in_msg;
1037 rx_ctx->len_max = CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_REQUEST;
1038 tx->buf = data->out_msg_padded + SHI_OUT_START_PAD;
1039 tx->len_max = CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_RESPONSE;
1040
1041 return 0;
1042 }
1043
shi_npcx_backend_send(const struct ec_host_cmd_backend * backend)1044 static int shi_npcx_backend_send(const struct ec_host_cmd_backend *backend)
1045 {
1046 struct ec_host_cmd_shi_npcx_ctx *hc_shi = (struct ec_host_cmd_shi_npcx_ctx *)backend->ctx;
1047 struct shi_npcx_data *data = hc_shi->dev->data;
1048 uint8_t *out_buf = data->out_msg + EC_SHI_FRAME_START_LENGTH;
1049
1050 if (!IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
1051 /*
1052 * Disable interrupts. This routine is not called from interrupt context and buffer
1053 * underrun will likely occur if it is preempted after writing its initial reply
1054 * byte. Also, we must be sure our state doesn't unexpectedly change, in case we're
1055 * expected to take RESP_NOT_RDY actions.
1056 */
1057 __disable_irq();
1058 }
1059
1060 if (data->state == SHI_STATE_PROCESSING) {
1061 /* Append our past-end byte, which we reserved space for. */
1062 ((uint8_t *)out_buf)[data->tx->len] = EC_SHI_PAST_END;
1063
1064 /* Computing sending bytes of response */
1065 data->sz_response = data->tx->len + EC_SHI_PROTO3_OVERHEAD;
1066
1067 /* Start to fill output buffer with msg buffer */
1068 shi_npcx_write_first_pkg_outbuf(hc_shi->dev, data->sz_response);
1069
1070 /* Transmit the reply */
1071 data->state = SHI_STATE_SENDING;
1072 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
1073 struct shi_reg *const inst = HAL_INSTANCE(hc_shi->dev);
1074
1075 /*
1076 * Enable output buffer half/full empty interrupt and
1077 * switch output mode from the repeated single byte mode
1078 * to FIFO mode.
1079 */
1080 inst->EVENABLE |= BIT(NPCX_EVENABLE_OBEEN) | BIT(NPCX_EVENABLE_OBHEEN);
1081 inst->SHICFG6 |= BIT(NPCX_SHICFG6_OBUF_SL);
1082 }
1083 LOG_DBG("SND-");
1084 } else if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
1085 /*
1086 * If we're not processing, then the AP has already terminated
1087 * the transaction, and won't be listening for a response.
1088 * Reset state machine for next transaction.
1089 */
1090 shi_npcx_reset_prepare(hc_shi->dev);
1091 LOG_DBG("END\n");
1092 } else {
1093 LOG_ERR("Unexpected state %d in response handler", data->state);
1094 }
1095
1096 if (!IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
1097 __enable_irq();
1098 }
1099
1100 return 0;
1101 }
1102
1103 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
1104 .init = shi_npcx_backend_init,
1105 .send = shi_npcx_backend_send,
1106 };
1107
1108 #ifdef CONFIG_PM_DEVICE
shi_npcx_pm_cb(const struct device * dev,enum pm_device_action action)1109 static int shi_npcx_pm_cb(const struct device *dev, enum pm_device_action action)
1110 {
1111 int ret = 0;
1112
1113 switch (action) {
1114 case PM_DEVICE_ACTION_SUSPEND:
1115 shi_npcx_disable(dev);
1116 break;
1117 case PM_DEVICE_ACTION_RESUME:
1118 shi_npcx_enable(dev);
1119 break;
1120 default:
1121 ret = -ENOTSUP;
1122 break;
1123 }
1124
1125 return ret;
1126 }
1127 #endif
1128
1129 /* Assume only one peripheral */
1130 PM_DEVICE_DT_INST_DEFINE(0, shi_npcx_pm_cb);
1131
1132 PINCTRL_DT_INST_DEFINE(0);
1133 static const struct shi_npcx_config shi_cfg = {
1134 .base = DT_INST_REG_ADDR(0),
1135 .clk_cfg = NPCX_DT_CLK_CFG_ITEM(0),
1136 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1137 .irq = DT_INST_IRQN(0),
1138 .shi_cs_wui = NPCX_DT_WUI_ITEM_BY_NAME(0, shi_cs_wui),
1139 };
1140
1141 static struct shi_npcx_data shi_data = {
1142 .state = SHI_STATE_DISABLED,
1143 .last_error_state = SHI_STATE_NONE,
1144 .out_msg = shi_data.out_msg_padded + SHI_OUT_START_PAD - EC_SHI_FRAME_START_LENGTH,
1145 };
1146
1147 DEVICE_DT_INST_DEFINE(0, shi_npcx_init, PM_DEVICE_DT_INST_GET(0), &shi_data, &shi_cfg, POST_KERNEL,
1148 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ec_host_cmd_api);
1149
1150 EC_HOST_CMD_SHI_NPCX_DEFINE(ec_host_cmd_shi_npcx);
1151
ec_host_cmd_backend_get_shi_npcx(void)1152 struct ec_host_cmd_backend *ec_host_cmd_backend_get_shi_npcx(void)
1153 {
1154 return &ec_host_cmd_shi_npcx;
1155 }
1156
1157 #if DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_shi_backend)) && \
1158 defined(CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT)
host_cmd_init(void)1159 static int host_cmd_init(void)
1160 {
1161 ec_host_cmd_init(ec_host_cmd_backend_get_shi_npcx());
1162 return 0;
1163 }
1164 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
1165 #endif
1166