1 /*
2 * Copyright (c) 2022 Google LLC
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "ec_host_cmd_backend_shi.h"
8
9 #include <zephyr/drivers/clock_control.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <zephyr/logging/log.h>
12 #include <zephyr/mgmt/ec_host_cmd/backend.h>
13 #include <zephyr/mgmt/ec_host_cmd/ec_host_cmd.h>
14 #include <zephyr/pm/device.h>
15 #include <zephyr/pm/device_runtime.h>
16 #include <zephyr/pm/policy.h>
17
18 #include <soc_miwu.h>
19
20 #if DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi)
21 #define DT_DRV_COMPAT nuvoton_npcx_shi
22 #elif DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi_enhanced)
23 #define DT_DRV_COMPAT nuvoton_npcx_shi_enhanced
24 #endif
25 BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1, "Invalid number of NPCX SHI peripherals");
26 BUILD_ASSERT(!(DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi) &&
27 DT_HAS_COMPAT_STATUS_OKAY(nuvoton_npcx_shi_enhanced)));
28
29 LOG_MODULE_REGISTER(host_cmd_shi_npcx, CONFIG_EC_HC_LOG_LEVEL);
30
31 /* Driver convenience defines */
32 #define HAL_INSTANCE(dev) (struct shi_reg *)(((const struct shi_npcx_config *)(dev)->config)->base)
33
34 /* Full output buffer size */
35 #define SHI_OBUF_FULL_SIZE DT_INST_PROP(0, buffer_tx_size)
36 /* Full input buffer size */
37 #define SHI_IBUF_FULL_SIZE DT_INST_PROP(0, buffer_rx_size)
38 /* Configure the IBUFLVL2 = the size of V3 protocol header */
39 #define SHI_IBUFLVL2_THRESHOLD (sizeof(struct ec_host_cmd_request_header))
40 /* Half output buffer size */
41 #define SHI_OBUF_HALF_SIZE (SHI_OBUF_FULL_SIZE / 2)
42 /* Half input buffer size */
43 #define SHI_IBUF_HALF_SIZE (SHI_IBUF_FULL_SIZE / 2)
44
45 /*
46 * Timeout to wait for SHI request packet
47 *
48 * This affects the slowest SPI clock we can support. A delay of 8192 us permits a 512-byte request
49 * at 500 KHz, assuming the SPI controller starts sending bytes as soon as it asserts chip select.
50 * That's as slow as we would practically want to run the SHI interface, since running it slower
51 * significantly impacts firmware update times.
52 */
53 #define EC_SHI_CMD_RX_TIMEOUT_US 8192
54
55 /*
56 * The AP blindly clocks back bytes over the SPI interface looking for a framing byte.
57 * So this preamble must always precede the actual response packet.
58 */
59 #define EC_SHI_OUT_PREAMBLE_LENGTH 2
60
61 /*
62 * Space allocation of the past-end status byte (EC_SHI_PAST_END) in the out_msg buffer.
63 */
64 #define EC_SHI_PAST_END_LENGTH 1
65
66 /*
67 * Space allocation of the frame status byte (EC_SHI_FRAME_START) in the out_msg buffer.
68 */
69 #define EC_SHI_FRAME_START_LENGTH 1
70
71 /*
72 * Offset of output parameters needs to account for pad and framing bytes and
73 * one last past-end byte at the end so any additional bytes clocked out by
74 * the AP will have a known and identifiable value.
75 */
76 #define EC_SHI_PROTO3_OVERHEAD (EC_SHI_PAST_END_LENGTH + EC_SHI_FRAME_START_LENGTH)
77
78 /*
79 * Our input and output msg buffers. These must be large enough for our largest
80 * message, including protocol overhead. The pointers after the protocol
81 * overhead, as passed to the host command handler, must be 32-bit aligned.
82 */
83 #define SHI_OUT_START_PAD (4 * (EC_SHI_FRAME_START_LENGTH / 4 + 1))
84 #define SHI_OUT_END_PAD (4 * (EC_SHI_PAST_END_LENGTH / 4 + 1))
85
86 enum shi_npcx_state {
87 SHI_STATE_NONE = -1,
88 /* SHI not enabled (initial state, and when chipset is off) */
89 SHI_STATE_DISABLED = 0,
90 /* Ready to receive next request */
91 SHI_STATE_READY_TO_RECV,
92 /* Receiving request */
93 SHI_STATE_RECEIVING,
94 /* Processing request */
95 SHI_STATE_PROCESSING,
96 /* Canceling response since CS deasserted and output NOT_READY byte */
97 SHI_STATE_CNL_RESP_NOT_RDY,
98 /* Sending response */
99 SHI_STATE_SENDING,
100 /* Received data is invalid */
101 SHI_STATE_BAD_RECEIVED_DATA,
102 };
103
104 enum shi_npcx_pm_policy_state_flag {
105 SHI_NPCX_PM_POLICY_FLAG,
106 SHI_NPCX_PM_POLICY_FLAG_COUNT,
107 };
108
109 /* Device config */
110 struct shi_npcx_config {
111 /* Serial Host Interface (SHI) base address */
112 uintptr_t base;
113 /* Clock configuration */
114 struct npcx_clk_cfg clk_cfg;
115 /* Pin control configuration */
116 const struct pinctrl_dev_config *pcfg;
117 /* Chip-select interrupts */
118 int irq;
119 struct npcx_wui shi_cs_wui;
120 };
121
122 struct shi_npcx_data {
123 struct ec_host_cmd_rx_ctx *rx_ctx;
124 struct ec_host_cmd_tx_buf *tx;
125 /* Communication status */
126 enum shi_npcx_state state;
127 enum shi_npcx_state last_error_state;
128 uint8_t *rx_msg; /* Entry pointer of msg rx buffer */
129 uint8_t *tx_msg; /* Entry pointer of msg tx buffer */
130 volatile uint8_t *rx_buf; /* Entry pointer of receive buffer */
131 volatile uint8_t *tx_buf; /* Entry pointer of transmit buffer */
132 uint16_t sz_sending; /* Size of sending data in bytes */
133 uint16_t sz_request; /* Request bytes need to receive */
134 uint16_t sz_response; /* Response bytes need to receive */
135 uint64_t rx_deadline; /* Deadline of receiving */
136 /* Buffers */
137 uint8_t out_msg_padded[SHI_OUT_START_PAD + CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_RESPONSE +
138 SHI_OUT_END_PAD] __aligned(4);
139 uint8_t *const out_msg;
140 uint8_t in_msg[CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_REQUEST] __aligned(4);
141 ATOMIC_DEFINE(pm_policy_state_flag, SHI_NPCX_PM_POLICY_FLAG_COUNT);
142 };
143
144 struct ec_host_cmd_shi_npcx_ctx {
145 /* SHI device instance */
146 const struct device *dev;
147 };
148
149 #define EC_HOST_CMD_SHI_NPCX_DEFINE(_name) \
150 static struct ec_host_cmd_shi_npcx_ctx _name##_hc_shi_npcx; \
151 struct ec_host_cmd_backend _name = { \
152 .api = &ec_host_cmd_api, \
153 .ctx = (struct ec_host_cmd_shi_npcx_ctx *)&_name##_hc_shi_npcx, \
154 }
155
156 /* Forward declaration */
157 static void shi_npcx_reset_prepare(const struct device *dev);
158
shi_npcx_pm_policy_state_lock_get(struct shi_npcx_data * data,enum shi_npcx_pm_policy_state_flag flag)159 static void shi_npcx_pm_policy_state_lock_get(struct shi_npcx_data *data,
160 enum shi_npcx_pm_policy_state_flag flag)
161 {
162 if (atomic_test_and_set_bit(data->pm_policy_state_flag, flag) == 0) {
163 pm_policy_state_lock_get(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
164 }
165 }
166
shi_npcx_pm_policy_state_lock_put(struct shi_npcx_data * data,enum shi_npcx_pm_policy_state_flag flag)167 static void shi_npcx_pm_policy_state_lock_put(struct shi_npcx_data *data,
168 enum shi_npcx_pm_policy_state_flag flag)
169 {
170 if (atomic_test_and_clear_bit(data->pm_policy_state_flag, flag) == 1) {
171 pm_policy_state_lock_put(PM_STATE_SUSPEND_TO_IDLE, PM_ALL_SUBSTATES);
172 }
173 }
174
175 /* Read pointer of input or output buffer by consecutive reading */
shi_npcx_read_buf_pointer(struct shi_reg * const inst)176 static uint32_t shi_npcx_read_buf_pointer(struct shi_reg *const inst)
177 {
178 uint8_t stat;
179
180 /* Wait for two consecutive equal values read */
181 do {
182 stat = inst->IBUFSTAT;
183 } while (stat != inst->IBUFSTAT);
184
185 return (uint32_t)stat;
186 }
187
188 /*
189 * Write pointer of output buffer by consecutive reading
190 * Note: this function (OBUFSTAT) should only be usd in Enhanced Buffer Mode.
191 */
shi_npcx_write_buf_pointer(struct shi_reg * const inst)192 static uint32_t shi_npcx_write_buf_pointer(struct shi_reg *const inst)
193 {
194 uint8_t stat;
195
196 /* Wait for two consecutive equal values are read */
197 do {
198 stat = inst->OBUFSTAT;
199 } while (stat != inst->OBUFSTAT);
200
201 return stat;
202 }
203
204 /*
205 * Valid offset of SHI output buffer to write.
206 * - In Simultaneous Standard FIFO Mode (SIMUL = 1 and EBUFMD = 0):
207 * OBUFPTR cannot be used. IBUFPTR can be used instead because it points to
208 * the same location as OBUFPTR.
209 * - In Simultaneous Enhanced FIFO Mode (SIMUL = 1 and EBUFMD = 1):
210 * IBUFPTR may not point to the same location as OBUFPTR.
211 * In this case OBUFPTR reflects the 128-byte payload buffer pointer only
212 * during the SPI transaction.
213 */
shi_npcx_valid_obuf_offset(struct shi_reg * const inst)214 static uint32_t shi_npcx_valid_obuf_offset(struct shi_reg *const inst)
215 {
216 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
217 return shi_npcx_write_buf_pointer(inst) % SHI_OBUF_FULL_SIZE;
218 } else {
219 return (shi_npcx_read_buf_pointer(inst) + EC_SHI_OUT_PREAMBLE_LENGTH) %
220 SHI_OBUF_FULL_SIZE;
221 }
222 }
223
224 /*
225 * This routine write SHI next half output buffer from msg buffer
226 */
shi_npcx_write_half_outbuf(const struct device * dev)227 static void shi_npcx_write_half_outbuf(const struct device *dev)
228 {
229 struct shi_npcx_data *data = dev->data;
230
231 const uint32_t size = MIN(SHI_OBUF_HALF_SIZE, data->sz_response - data->sz_sending);
232 uint8_t *obuf_ptr = (uint8_t *)data->tx_buf;
233 const uint8_t *obuf_end = obuf_ptr + size;
234 uint8_t *msg_ptr = data->tx_msg;
235
236 /* Fill half output buffer */
237 while (obuf_ptr != obuf_end) {
238 *obuf_ptr++ = *msg_ptr++;
239 }
240
241 data->sz_sending += size;
242 data->tx_buf = obuf_ptr;
243 data->tx_msg = msg_ptr;
244 }
245
246 /*
247 * This routine read SHI input buffer to msg buffer until
248 * we have received a certain number of bytes
249 */
shi_npcx_read_inbuf_wait(const struct device * dev,uint32_t szbytes)250 static int shi_npcx_read_inbuf_wait(const struct device *dev, uint32_t szbytes)
251 {
252 struct shi_npcx_data *data = dev->data;
253 struct shi_reg *const inst = HAL_INSTANCE(dev);
254
255 /* Copy data to msg buffer from input buffer */
256 for (uint32_t i = 0; i < szbytes; i++, data->rx_ctx->len++) {
257 /*
258 * If input buffer pointer equals pointer which wants to read,
259 * it means data is not ready.
260 */
261 while (data->rx_buf == inst->IBUF + shi_npcx_read_buf_pointer(inst)) {
262 if (k_cycle_get_64() >= data->rx_deadline) {
263 return 0;
264 }
265 }
266
267 /* Copy data to msg buffer */
268 *data->rx_msg++ = *data->rx_buf++;
269 }
270 return 1;
271 }
272
273 /* This routine fills out all SHI output buffer with status byte */
shi_npcx_fill_out_status(struct shi_reg * const inst,uint8_t status)274 static void shi_npcx_fill_out_status(struct shi_reg *const inst, uint8_t status)
275 {
276 uint8_t start, end;
277 volatile uint8_t *fill_ptr;
278 volatile uint8_t *fill_end;
279 volatile uint8_t *obuf_end;
280
281 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
282 /*
283 * In Enhanced Buffer Mode, SHI module outputs the status code
284 * in SBOBUF repeatedly.
285 */
286 inst->SBOBUF = status;
287
288 return;
289 }
290
291 /*
292 * Disable interrupts in case the interfere by the other interrupts.
293 * Use __disable_irq/__enable_irq instead of using irq_lock/irq_unlock
294 * here because irq_lock/irq_unlock leave some system exceptions (like
295 * SVC, NMI, and faults) still enabled.
296 */
297 __disable_irq();
298
299 /*
300 * Fill out output buffer with status byte and leave a gap for PREAMBLE.
301 * The gap guarantees the synchronization. The critical section should
302 * be done within this gap. No racing happens.
303 */
304 start = shi_npcx_valid_obuf_offset(inst);
305 end = (start + SHI_OBUF_FULL_SIZE - EC_SHI_OUT_PREAMBLE_LENGTH) % SHI_OBUF_FULL_SIZE;
306
307 fill_ptr = inst->OBUF + start;
308 fill_end = inst->OBUF + end;
309 obuf_end = inst->OBUF + SHI_OBUF_FULL_SIZE;
310 while (fill_ptr != fill_end) {
311 *fill_ptr++ = status;
312 if (fill_ptr == obuf_end) {
313 fill_ptr = inst->OBUF;
314 }
315 }
316
317 /* End of critical section */
318 __enable_irq();
319 }
320
321 /* This routine handles shi received unexpected data */
shi_npcx_bad_received_data(const struct device * dev)322 static void shi_npcx_bad_received_data(const struct device *dev)
323 {
324 struct shi_npcx_data *data = dev->data;
325 struct shi_reg *const inst = HAL_INSTANCE(dev);
326
327 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
328 inst->EVENABLE &= ~IBF_IBHF_EN_MASK;
329 }
330
331 /* State machine mismatch, timeout, or protocol we can't handle. */
332 shi_npcx_fill_out_status(inst, EC_SHI_RX_BAD_DATA);
333 data->state = SHI_STATE_BAD_RECEIVED_DATA;
334
335 LOG_ERR("SHI bad data recv");
336 LOG_DBG("BAD-");
337 LOG_HEXDUMP_DBG(data->in_msg, data->rx_ctx->len, "in_msg=");
338
339 /* Reset shi's state machine for error recovery */
340 shi_npcx_reset_prepare(dev);
341
342 LOG_DBG("END");
343 }
344
345 /*
346 * This routine write SHI output buffer from msg buffer over halt of it.
347 * It make sure we have enough time to handle next operations.
348 */
shi_npcx_write_first_pkg_outbuf(const struct device * dev,uint16_t szbytes)349 static void shi_npcx_write_first_pkg_outbuf(const struct device *dev, uint16_t szbytes)
350 {
351 struct shi_npcx_data *data = dev->data;
352 struct shi_reg *const inst = HAL_INSTANCE(dev);
353 uint8_t size, offset;
354 volatile uint8_t *obuf_ptr;
355 volatile uint8_t *obuf_end;
356 uint8_t *msg_ptr;
357 uint32_t half_buf_remain; /* Remains in half buffer are free to write */
358
359 /* Start writing at our current OBUF position */
360 offset = shi_npcx_valid_obuf_offset(inst);
361 obuf_ptr = inst->OBUF + offset;
362 msg_ptr = data->tx_msg;
363
364 /* Fill up to OBUF mid point, or OBUF end */
365 half_buf_remain = SHI_OBUF_HALF_SIZE - (offset % SHI_OBUF_HALF_SIZE);
366 size = MIN(half_buf_remain, szbytes - data->sz_sending);
367 obuf_end = obuf_ptr + size;
368 while (obuf_ptr != obuf_end) {
369 *obuf_ptr++ = *msg_ptr++;
370 }
371
372 /* Track bytes sent for later accounting */
373 data->sz_sending += size;
374
375 /* Write data to beginning of OBUF if we've reached the end */
376 if (obuf_ptr == inst->OBUF + SHI_IBUF_FULL_SIZE) {
377 obuf_ptr = inst->OBUF;
378 }
379
380 /* Fill next half output buffer */
381 size = MIN(SHI_OBUF_HALF_SIZE, szbytes - data->sz_sending);
382 obuf_end = obuf_ptr + size;
383 while (obuf_ptr != obuf_end) {
384 *obuf_ptr++ = *msg_ptr++;
385 }
386
387 /* Track bytes sent / last OBUF position written for later accounting */
388 data->sz_sending += size;
389 data->tx_buf = obuf_ptr;
390 data->tx_msg = msg_ptr;
391 }
392
shi_npcx_handle_host_package(const struct device * dev)393 static void shi_npcx_handle_host_package(const struct device *dev)
394 {
395 struct shi_npcx_data *data = dev->data;
396 struct shi_reg *const inst = HAL_INSTANCE(dev);
397 uint32_t sz_inbuf_int = data->sz_request / SHI_IBUF_HALF_SIZE;
398 uint32_t cnt_inbuf_int = data->rx_ctx->len / SHI_IBUF_HALF_SIZE;
399
400 if (sz_inbuf_int - cnt_inbuf_int) {
401 /* Need to receive data from buffer */
402 return;
403 }
404
405 uint32_t remain_bytes = data->sz_request - data->rx_ctx->len;
406
407 /* Read remaining bytes from input buffer */
408 if (!shi_npcx_read_inbuf_wait(dev, remain_bytes)) {
409 return shi_npcx_bad_received_data(dev);
410 }
411
412 /* Move to processing state */
413 data->state = SHI_STATE_PROCESSING;
414 LOG_DBG("PRC-");
415
416 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
417 inst->EVENABLE &= ~IBF_IBHF_EN_MASK;
418 }
419
420 /* Fill output buffer to indicate we`re processing request */
421 shi_npcx_fill_out_status(inst, EC_SHI_PROCESSING);
422 data->out_msg[0] = EC_SHI_FRAME_START;
423
424 /* Wake-up the HC handler thread */
425 ec_host_cmd_rx_notify();
426 }
427
shi_npcx_host_request_expected_size(const struct ec_host_cmd_request_header * r)428 static int shi_npcx_host_request_expected_size(const struct ec_host_cmd_request_header *r)
429 {
430 /* Check host request version */
431 if (r->prtcl_ver != EC_HOST_REQUEST_VERSION) {
432 return 0;
433 }
434
435 /* Reserved byte should be 0 */
436 if (r->reserved) {
437 return 0;
438 }
439
440 return sizeof(*r) + r->data_len;
441 }
442
shi_npcx_parse_header(const struct device * dev)443 static void shi_npcx_parse_header(const struct device *dev)
444 {
445 struct shi_npcx_data *data = dev->data;
446
447 /* We're now inside a transaction */
448 data->state = SHI_STATE_RECEIVING;
449 LOG_DBG("RV-");
450
451 /* Setup deadline time for receiving */
452 data->rx_deadline = k_cycle_get_64() + k_us_to_cyc_near64(EC_SHI_CMD_RX_TIMEOUT_US);
453
454 /* Wait for version, command, length bytes */
455 if (!shi_npcx_read_inbuf_wait(dev, 3)) {
456 return shi_npcx_bad_received_data(dev);
457 }
458
459 if (data->in_msg[0] == EC_HOST_REQUEST_VERSION) {
460 /* Protocol version 3 */
461 struct ec_host_cmd_request_header *r =
462 (struct ec_host_cmd_request_header *)data->in_msg;
463 int pkt_size;
464
465 /*
466 * If request is over half of input buffer, we need to modify the algorithm again.
467 */
468 __ASSERT_NO_MSG(sizeof(*r) < SHI_IBUF_HALF_SIZE);
469
470 /* Wait for the rest of the command header */
471 if (!shi_npcx_read_inbuf_wait(dev, sizeof(*r) - 3)) {
472 return shi_npcx_bad_received_data(dev);
473 }
474
475 /* Check how big the packet should be */
476 pkt_size = shi_npcx_host_request_expected_size(r);
477 if (pkt_size == 0 || pkt_size > sizeof(data->in_msg)) {
478 return shi_npcx_bad_received_data(dev);
479 }
480
481 /* Computing total bytes need to receive */
482 data->sz_request = pkt_size;
483
484 shi_npcx_handle_host_package(dev);
485 } else {
486 /* Invalid version number */
487 return shi_npcx_bad_received_data(dev);
488 }
489 }
490
shi_npcx_sec_ibf_int_enable(struct shi_reg * const inst,int enable)491 static void shi_npcx_sec_ibf_int_enable(struct shi_reg *const inst, int enable)
492 {
493 if (enable) {
494 /* Setup IBUFLVL2 threshold and enable it */
495 inst->SHICFG5 |= BIT(NPCX_SHICFG5_IBUFLVL2DIS);
496 SET_FIELD(inst->SHICFG5, NPCX_SHICFG5_IBUFLVL2, SHI_IBUFLVL2_THRESHOLD);
497 inst->SHICFG5 &= ~BIT(NPCX_SHICFG5_IBUFLVL2DIS);
498
499 /* Enable IBHF2 event */
500 inst->EVENABLE2 |= BIT(NPCX_EVENABLE2_IBHF2EN);
501 } else {
502 /* Disable IBHF2 event first */
503 inst->EVENABLE2 &= ~BIT(NPCX_EVENABLE2_IBHF2EN);
504
505 /* Disable IBUFLVL2 and set threshold back to zero */
506 inst->SHICFG5 |= BIT(NPCX_SHICFG5_IBUFLVL2DIS);
507 SET_FIELD(inst->SHICFG5, NPCX_SHICFG5_IBUFLVL2, 0);
508 }
509 }
510
511 /* This routine copies SHI half input buffer data to msg buffer */
shi_npcx_read_half_inbuf(const struct device * dev)512 static void shi_npcx_read_half_inbuf(const struct device *dev)
513 {
514 struct shi_npcx_data *data = dev->data;
515
516 /*
517 * Copy to read buffer until reaching middle/top address of
518 * input buffer or completing receiving data
519 */
520 do {
521 /* Restore data to msg buffer */
522 *data->rx_msg++ = *data->rx_buf++;
523 data->rx_ctx->len++;
524 } while (data->rx_ctx->len % SHI_IBUF_HALF_SIZE && data->rx_ctx->len != data->sz_request);
525 }
526
527 /*
528 * Avoid spamming the console with prints every IBF / IBHF interrupt, if
529 * we find ourselves in an unexpected state.
530 */
shi_npcx_log_unexpected_state(const struct device * dev,char * isr_name)531 static void shi_npcx_log_unexpected_state(const struct device *dev, char *isr_name)
532 {
533 struct shi_npcx_data *data = dev->data;
534
535 if (data->state != data->last_error_state) {
536 LOG_ERR("Unexpected state %d in %s ISR", data->state, isr_name);
537 }
538
539 data->last_error_state = data->state;
540 }
541
shi_npcx_handle_cs_assert(const struct device * dev)542 static void shi_npcx_handle_cs_assert(const struct device *dev)
543 {
544 struct shi_reg *const inst = HAL_INSTANCE(dev);
545 struct shi_npcx_data *data = dev->data;
546
547 /* If not enabled, ignore glitches on SHI_CS_L */
548 if (data->state == SHI_STATE_DISABLED) {
549 return;
550 }
551
552 /* NOT_READY should be sent and there're no spi transaction now. */
553 if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
554 return;
555 }
556
557 /* Chip select is low = asserted */
558 if (data->state != SHI_STATE_READY_TO_RECV) {
559 /* State machine should be reset in EVSTAT_EOR ISR */
560 LOG_ERR("Unexpected state %d in CS ISR", data->state);
561 return;
562 }
563
564 LOG_DBG("CSL-");
565
566 /*
567 * Clear possible EOR event from previous transaction since it's
568 * irrelevant now that CS is re-asserted.
569 */
570 inst->EVSTAT = BIT(NPCX_EVSTAT_EOR);
571
572 shi_npcx_pm_policy_state_lock_get(data, SHI_NPCX_PM_POLICY_FLAG);
573 }
574
shi_npcx_handle_cs_deassert(const struct device * dev)575 static void shi_npcx_handle_cs_deassert(const struct device *dev)
576 {
577 struct shi_reg *const inst = HAL_INSTANCE(dev);
578 struct shi_npcx_data *data = dev->data;
579
580 /*
581 * If the buffer is still used by the host command.
582 * Change state machine for response handler.
583 */
584 if (data->state == SHI_STATE_PROCESSING) {
585 /*
586 * Mark not ready to prevent the other
587 * transaction immediately
588 */
589 shi_npcx_fill_out_status(inst, EC_SHI_NOT_READY);
590
591 data->state = SHI_STATE_CNL_RESP_NOT_RDY;
592
593 /*
594 * Disable SHI interrupt, it will remain disabled until shi_send_response_packet()
595 * is called and CS is asserted for a new transaction.
596 */
597 irq_disable(DT_INST_IRQN(0));
598
599 LOG_DBG("CNL-");
600 return;
601 /* Next transaction but we're not ready */
602 } else if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
603 return;
604 }
605
606 /* Error state for checking*/
607 if (data->state != SHI_STATE_SENDING) {
608 shi_npcx_log_unexpected_state(dev, "CSNRE");
609 }
610
611 /* reset SHI and prepare to next transaction again */
612 shi_npcx_reset_prepare(dev);
613 LOG_DBG("END\n");
614 }
615
shi_npcx_handle_input_buf_half_full(const struct device * dev)616 static void shi_npcx_handle_input_buf_half_full(const struct device *dev)
617 {
618 struct shi_reg *const inst = HAL_INSTANCE(dev);
619 struct shi_npcx_data *data = dev->data;
620
621 if (data->state == SHI_STATE_RECEIVING) {
622 /* Read data from input to msg buffer */
623 shi_npcx_read_half_inbuf(dev);
624 return shi_npcx_handle_host_package(dev);
625 } else if (data->state == SHI_STATE_SENDING) {
626 /* Write data from msg buffer to output buffer */
627 if (data->tx_buf == inst->OBUF + SHI_OBUF_FULL_SIZE) {
628 /* Write data from bottom address again */
629 data->tx_buf = inst->OBUF;
630 shi_npcx_write_half_outbuf(dev);
631 }
632 } else if (data->state == SHI_STATE_PROCESSING) {
633 /* Wait for host to handle request */
634 } else {
635 /* Unexpected status */
636 shi_npcx_log_unexpected_state(dev, "IBHF");
637 }
638 }
639
shi_npcx_handle_input_buf_full(const struct device * dev)640 static void shi_npcx_handle_input_buf_full(const struct device *dev)
641 {
642 struct shi_npcx_data *data = dev->data;
643 struct shi_reg *const inst = HAL_INSTANCE(dev);
644
645 if (data->state == SHI_STATE_RECEIVING) {
646 /* read data from input to msg buffer */
647 shi_npcx_read_half_inbuf(dev);
648 /* Read to bottom address again */
649 data->rx_buf = inst->IBUF;
650 return shi_npcx_handle_host_package(dev);
651 } else if (data->state == SHI_STATE_SENDING) {
652 /* Write data from msg buffer to output buffer */
653 if (data->tx_buf == inst->OBUF + SHI_OBUF_HALF_SIZE) {
654 shi_npcx_write_half_outbuf(dev);
655 }
656
657 return;
658 } else if (data->state == SHI_STATE_PROCESSING) {
659 /* Wait for host to handle request */
660 return;
661 }
662
663 /* Unexpected status */
664 shi_npcx_log_unexpected_state(dev, "IBF");
665 }
666
shi_npcx_isr(const struct device * dev)667 static void shi_npcx_isr(const struct device *dev)
668 {
669 struct shi_reg *const inst = HAL_INSTANCE(dev);
670 uint8_t stat;
671 uint8_t stat2;
672
673 /* Read status register and clear interrupt status early */
674 stat = inst->EVSTAT;
675 inst->EVSTAT = stat;
676 stat2 = inst->EVSTAT2;
677
678 /* SHI CS pin is asserted in EVSTAT2 */
679 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_CSNFE)) {
680 /* Clear pending bit of CSNFE */
681 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_CSNFE);
682 LOG_DBG("CSNFE-");
683
684 /*
685 * BUSY bit is set when SHI_CS is asserted. If not, leave it for
686 * SHI_CS de-asserted event.
687 */
688 if (!IS_BIT_SET(inst->SHICFG2, NPCX_SHICFG2_BUSY)) {
689 LOG_DBG("CSNB-");
690 return;
691 }
692
693 shi_npcx_handle_cs_assert(dev);
694 }
695
696 /*
697 * End of data for read/write transaction. i.e. SHI_CS is deasserted.
698 * Host completed or aborted transaction
699 *
700 * EOR has the limitation that it will not be set even if the SHI_CS is deasserted without
701 * SPI clocks. The new SHI module introduce the CSNRE bit which will be set when SHI_CS is
702 * deasserted regardless of SPI clocks.
703 */
704 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_CSNRE)) {
705 /* Clear pending bit of CSNRE */
706 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_CSNRE);
707
708 /*
709 * We're not in proper state.
710 * Mark not ready to abort next transaction
711 */
712 LOG_DBG("CSH-");
713 return shi_npcx_handle_cs_deassert(dev);
714 }
715
716 /*
717 * The number of bytes received reaches the size of
718 * protocol V3 header(=8) after CS asserted.
719 */
720 if (IS_BIT_SET(stat2, NPCX_EVSTAT2_IBHF2)) {
721 /* Clear IBHF2 */
722 inst->EVSTAT2 = BIT(NPCX_EVSTAT2_IBHF2);
723 LOG_DBG("HDR-");
724
725 /* Disable second IBF interrupt and start to parse header */
726 shi_npcx_sec_ibf_int_enable(inst, 0);
727 shi_npcx_parse_header(dev);
728 }
729
730 /*
731 * Indicate input/output buffer pointer reaches the half buffer size.
732 * Transaction is processing.
733 */
734 if (IS_BIT_SET(stat, NPCX_EVSTAT_IBHF)) {
735 return shi_npcx_handle_input_buf_half_full(dev);
736 }
737
738 /*
739 * Indicate input/output buffer pointer reaches the full buffer size.
740 * Transaction is processing.
741 */
742 if (IS_BIT_SET(stat, NPCX_EVSTAT_IBF)) {
743 return shi_npcx_handle_input_buf_full(dev);
744 }
745 }
746
shi_npcx_reset_prepare(const struct device * dev)747 static void shi_npcx_reset_prepare(const struct device *dev)
748 {
749 struct shi_reg *const inst = HAL_INSTANCE(dev);
750 struct shi_npcx_data *data = dev->data;
751 uint32_t i;
752
753 data->state = SHI_STATE_DISABLED;
754
755 irq_disable(DT_INST_IRQN(0));
756
757 /* Disable SHI unit to clear all status bits */
758 inst->SHICFG1 &= ~BIT(NPCX_SHICFG1_EN);
759
760 /* Initialize parameters of next transaction */
761 data->rx_msg = data->in_msg;
762 data->tx_msg = data->out_msg;
763 data->rx_buf = inst->IBUF;
764 data->tx_buf = inst->OBUF;
765 if (data->rx_ctx) {
766 data->rx_ctx->len = 0;
767 }
768 data->sz_sending = 0;
769 data->sz_request = 0;
770 data->sz_response = 0;
771
772 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
773 inst->SBOBUF = EC_SHI_RX_READY;
774 inst->SBOBUF = EC_SHI_RECEIVING;
775 inst->EVENABLE |= IBF_IBHF_EN_MASK;
776 inst->EVENABLE &= ~(BIT(NPCX_EVENABLE_OBEEN) | BIT(NPCX_EVENABLE_OBHEEN));
777 } else {
778 /*
779 * Fill output buffer to indicate we`re ready to receive next transaction.
780 */
781 for (i = 1; i < SHI_OBUF_FULL_SIZE; i++) {
782 inst->OBUF[i] = EC_SHI_RECEIVING;
783 }
784 inst->OBUF[0] = EC_SHI_RX_READY;
785 }
786
787 /* SHI/Host Write/input buffer wrap-around enable */
788 inst->SHICFG1 = BIT(NPCX_SHICFG1_IWRAP) | BIT(NPCX_SHICFG1_WEN) | BIT(NPCX_SHICFG1_EN);
789
790 data->state = SHI_STATE_READY_TO_RECV;
791 data->last_error_state = SHI_STATE_NONE;
792
793 shi_npcx_sec_ibf_int_enable(inst, 1);
794 irq_enable(DT_INST_IRQN(0));
795
796 shi_npcx_pm_policy_state_lock_put(data, SHI_NPCX_PM_POLICY_FLAG);
797
798 LOG_DBG("RDY-");
799 }
800
shi_npcx_enable(const struct device * dev)801 static int shi_npcx_enable(const struct device *dev)
802 {
803 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
804 const struct shi_npcx_config *const config = dev->config;
805 int ret;
806
807 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
808 if (ret < 0) {
809 LOG_ERR("Turn on SHI clock fail %d", ret);
810 return ret;
811 }
812
813 shi_npcx_reset_prepare(dev);
814 npcx_miwu_irq_disable(&config->shi_cs_wui);
815
816 /* Configure pin control for SHI */
817 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
818 if (ret < 0) {
819 LOG_ERR("shi_npcx pinctrl setup failed (%d)", ret);
820 return ret;
821 }
822
823 NVIC_ClearPendingIRQ(DT_INST_IRQN(0));
824 npcx_miwu_irq_enable(&config->shi_cs_wui);
825 irq_enable(DT_INST_IRQN(0));
826
827 return 0;
828 }
829
shi_npcx_disable(const struct device * dev)830 static int shi_npcx_disable(const struct device *dev)
831 {
832 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
833 const struct shi_npcx_config *const config = dev->config;
834 struct shi_npcx_data *data = dev->data;
835 int ret;
836
837 data->state = SHI_STATE_DISABLED;
838
839 irq_disable(DT_INST_IRQN(0));
840 npcx_miwu_irq_disable(&config->shi_cs_wui);
841
842 /* Configure pin control back to GPIO */
843 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_SLEEP);
844 if (ret < 0) {
845 LOG_ERR("KB Raw pinctrl setup failed (%d)", ret);
846 return ret;
847 }
848
849 ret = clock_control_off(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
850 if (ret < 0) {
851 LOG_ERR("Turn off SHI clock fail %d", ret);
852 return ret;
853 }
854
855 /*
856 * Allow deep sleep again in case CS dropped before ec was
857 * informed in hook function and turn off SHI's interrupt in time.
858 */
859 shi_npcx_pm_policy_state_lock_put(data, SHI_NPCX_PM_POLICY_FLAG);
860
861 return 0;
862 }
863
shi_npcx_init_registers(const struct device * dev)864 static int shi_npcx_init_registers(const struct device *dev)
865 {
866 int ret;
867 const struct shi_npcx_config *const config = dev->config;
868 struct shi_reg *const inst = HAL_INSTANCE(dev);
869 const struct device *clk_dev = DEVICE_DT_GET(NPCX_CLK_CTRL_NODE);
870
871 /* Turn on shi device clock first */
872 ret = clock_control_on(clk_dev, (clock_control_subsys_t)&config->clk_cfg);
873 if (ret < 0) {
874 LOG_ERR("Turn on SHI clock fail %d", ret);
875 return ret;
876 }
877
878 /* If booter doesn't set the host interface type */
879 if (!NPCX_BOOTER_IS_HIF_TYPE_SET()) {
880 npcx_host_interface_sel(NPCX_HIF_TYPE_ESPI_SHI);
881 }
882
883 /*
884 * SHICFG1 (SHI Configuration 1) setting
885 * [7] - IWRAP = 1: Wrap input buffer to the first address
886 * [6] - CPOL = 0: Sampling on rising edge and output on falling edge
887 * [5] - DAS = 0: return STATUS reg data after Status command
888 * [4] - AUTOBE = 0: Automatically update the OBES bit in STATUS reg
889 * [3] - AUTIBF = 0: Automatically update the IBFS bit in STATUS reg
890 * [2] - WEN = 0: Enable host write to input buffer
891 * [1] - Reserved 0
892 * [0] - ENABLE = 0: Disable SHI at the beginning
893 */
894 inst->SHICFG1 = BIT(NPCX_SHICFG1_IWRAP);
895
896 /*
897 * SHICFG2 (SHI Configuration 2) setting
898 * [7] - Reserved 0
899 * [6] - REEVEN = 0: Restart events are not used
900 * [5] - Reserved 0
901 * [4] - REEN = 0: Restart transactions are not used
902 * [3] - SLWU = 0: Seem-less wake-up is enabled by default
903 * [2] - ONESHOT= 0: WEN is cleared at the end of a write transaction
904 * [1] - BUSY = 0: SHI bus is busy 0: idle.
905 * [0] - SIMUL = 1: Turn on simultaneous Read/Write
906 */
907 inst->SHICFG2 = BIT(NPCX_SHICFG2_SIMUL);
908
909 /*
910 * EVENABLE (Event Enable) setting
911 * [7] - IBOREN = 0: Input buffer overrun interrupt enable
912 * [6] - STSREN = 0: status read interrupt disable
913 * [5] - EOWEN = 0: End-of-Data for Write Transaction Interrupt Enable
914 * [4] - EOREN = 1: End-of-Data for Read Transaction Interrupt Enable
915 * [3] - IBHFEN = 1: Input Buffer Half Full Interrupt Enable
916 * [2] - IBFEN = 1: Input Buffer Full Interrupt Enable
917 * [1] - OBHEEN = 0: Output Buffer Half Empty Interrupt Enable
918 * [0] - OBEEN = 0: Output Buffer Empty Interrupt Enable
919 */
920 inst->EVENABLE = BIT(NPCX_EVENABLE_EOREN) | IBF_IBHF_EN_MASK;
921
922 /*
923 * EVENABLE2 (Event Enable 2) setting
924 * [2] - CSNFEEN = 1: SHI_CS Falling Edge Interrupt Enable
925 * [1] - CSNREEN = 1: SHI_CS Rising Edge Interrupt Enable
926 * [0] - IBHF2EN = 0: Input Buffer Half Full 2 Interrupt Enable
927 */
928 inst->EVENABLE2 = BIT(NPCX_EVENABLE2_CSNREEN) | BIT(NPCX_EVENABLE2_CSNFEEN);
929
930 /* Clear SHI events status register */
931 inst->EVSTAT = 0xff;
932
933 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
934 inst->SHICFG6 |= BIT(NPCX_SHICFG6_EBUFMD);
935 }
936
937 npcx_miwu_interrupt_configure(&config->shi_cs_wui, NPCX_MIWU_MODE_EDGE, NPCX_MIWU_TRIG_LOW);
938
939 /* SHI interrupt installation */
940 IRQ_CONNECT(DT_INST_IRQN(0), DT_INST_IRQ(0, priority), shi_npcx_isr, DEVICE_DT_INST_GET(0),
941 0);
942
943 shi_npcx_enable(dev);
944
945 return ret;
946 }
947
shi_npcx_init(const struct device * dev)948 static int shi_npcx_init(const struct device *dev)
949 {
950 int ret;
951
952 ret = shi_npcx_init_registers(dev);
953 if (ret) {
954 return ret;
955 }
956 pm_device_init_suspended(dev);
957
958 return pm_device_runtime_enable(dev);
959 }
960
shi_npcx_backend_init(const struct ec_host_cmd_backend * backend,struct ec_host_cmd_rx_ctx * rx_ctx,struct ec_host_cmd_tx_buf * tx)961 static int shi_npcx_backend_init(const struct ec_host_cmd_backend *backend,
962 struct ec_host_cmd_rx_ctx *rx_ctx, struct ec_host_cmd_tx_buf *tx)
963 {
964 struct ec_host_cmd_shi_npcx_ctx *hc_shi = (struct ec_host_cmd_shi_npcx_ctx *)backend->ctx;
965 struct shi_npcx_data *data;
966
967 hc_shi->dev = DEVICE_DT_INST_GET(0);
968 if (!device_is_ready(hc_shi->dev)) {
969 return -ENODEV;
970 }
971
972 data = hc_shi->dev->data;
973 data->rx_ctx = rx_ctx;
974 data->tx = tx;
975
976 rx_ctx->buf = data->in_msg;
977 rx_ctx->len_max = CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_REQUEST;
978 tx->buf = data->out_msg_padded + SHI_OUT_START_PAD;
979 tx->len_max = CONFIG_EC_HOST_CMD_BACKEND_SHI_MAX_RESPONSE;
980
981 return 0;
982 }
983
shi_npcx_backend_send(const struct ec_host_cmd_backend * backend)984 static int shi_npcx_backend_send(const struct ec_host_cmd_backend *backend)
985 {
986 struct ec_host_cmd_shi_npcx_ctx *hc_shi = (struct ec_host_cmd_shi_npcx_ctx *)backend->ctx;
987 struct shi_npcx_data *data = hc_shi->dev->data;
988 uint8_t *out_buf = data->out_msg + EC_SHI_FRAME_START_LENGTH;
989
990 if (!IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
991 /*
992 * Disable interrupts. This routine is not called from interrupt context and buffer
993 * underrun will likely occur if it is preempted after writing its initial reply
994 * byte. Also, we must be sure our state doesn't unexpectedly change, in case we're
995 * expected to take RESP_NOT_RDY actions.
996 */
997 __disable_irq();
998 }
999
1000 if (data->state == SHI_STATE_PROCESSING) {
1001 /* Append our past-end byte, which we reserved space for. */
1002 ((uint8_t *)out_buf)[data->tx->len] = EC_SHI_PAST_END;
1003
1004 /* Computing sending bytes of response */
1005 data->sz_response = data->tx->len + EC_SHI_PROTO3_OVERHEAD;
1006
1007 /* Start to fill output buffer with msg buffer */
1008 shi_npcx_write_first_pkg_outbuf(hc_shi->dev, data->sz_response);
1009
1010 /* Transmit the reply */
1011 data->state = SHI_STATE_SENDING;
1012 if (IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
1013 struct shi_reg *const inst = HAL_INSTANCE(hc_shi->dev);
1014
1015 /*
1016 * Enable output buffer half/full empty interrupt and
1017 * switch output mode from the repeated single byte mode
1018 * to FIFO mode.
1019 */
1020 inst->EVENABLE |= BIT(NPCX_EVENABLE_OBEEN) | BIT(NPCX_EVENABLE_OBHEEN);
1021 inst->SHICFG6 |= BIT(NPCX_SHICFG6_OBUF_SL);
1022 }
1023 LOG_DBG("SND-");
1024 } else if (data->state == SHI_STATE_CNL_RESP_NOT_RDY) {
1025 /*
1026 * If we're not processing, then the AP has already terminated
1027 * the transaction, and won't be listening for a response.
1028 * Reset state machine for next transaction.
1029 */
1030 shi_npcx_reset_prepare(hc_shi->dev);
1031 LOG_DBG("END\n");
1032 } else {
1033 LOG_ERR("Unexpected state %d in response handler", data->state);
1034 }
1035
1036 if (!IS_ENABLED(CONFIG_EC_HOST_CMD_BACKEND_SHI_NPCX_ENHANCED_BUF_MODE)) {
1037 __enable_irq();
1038 }
1039
1040 return 0;
1041 }
1042
1043 static const struct ec_host_cmd_backend_api ec_host_cmd_api = {
1044 .init = shi_npcx_backend_init,
1045 .send = shi_npcx_backend_send,
1046 };
1047
1048 #ifdef CONFIG_PM_DEVICE
shi_npcx_pm_cb(const struct device * dev,enum pm_device_action action)1049 static int shi_npcx_pm_cb(const struct device *dev, enum pm_device_action action)
1050 {
1051 int ret = 0;
1052
1053 switch (action) {
1054 case PM_DEVICE_ACTION_SUSPEND:
1055 shi_npcx_disable(dev);
1056 break;
1057 case PM_DEVICE_ACTION_RESUME:
1058 shi_npcx_enable(dev);
1059 break;
1060 default:
1061 ret = -ENOTSUP;
1062 break;
1063 }
1064
1065 return ret;
1066 }
1067 #endif
1068
1069 /* Assume only one peripheral */
1070 PM_DEVICE_DT_INST_DEFINE(0, shi_npcx_pm_cb);
1071
1072 PINCTRL_DT_INST_DEFINE(0);
1073 static const struct shi_npcx_config shi_cfg = {
1074 .base = DT_INST_REG_ADDR(0),
1075 .clk_cfg = NPCX_DT_CLK_CFG_ITEM(0),
1076 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(0),
1077 .irq = DT_INST_IRQN(0),
1078 .shi_cs_wui = NPCX_DT_WUI_ITEM_BY_NAME(0, shi_cs_wui),
1079 };
1080
1081 static struct shi_npcx_data shi_data = {
1082 .state = SHI_STATE_DISABLED,
1083 .last_error_state = SHI_STATE_NONE,
1084 .out_msg = shi_data.out_msg_padded + SHI_OUT_START_PAD - EC_SHI_FRAME_START_LENGTH,
1085 };
1086
1087 DEVICE_DT_INST_DEFINE(0, shi_npcx_init, PM_DEVICE_DT_INST_GET(0), &shi_data, &shi_cfg, POST_KERNEL,
1088 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT, &ec_host_cmd_api);
1089
1090 EC_HOST_CMD_SHI_NPCX_DEFINE(ec_host_cmd_shi_npcx);
1091
ec_host_cmd_backend_get_shi_npcx(void)1092 struct ec_host_cmd_backend *ec_host_cmd_backend_get_shi_npcx(void)
1093 {
1094 return &ec_host_cmd_shi_npcx;
1095 }
1096
1097 #if DT_NODE_EXISTS(DT_CHOSEN(zephyr_host_cmd_shi_backend)) && \
1098 defined(CONFIG_EC_HOST_CMD_INITIALIZE_AT_BOOT)
host_cmd_init(void)1099 static int host_cmd_init(void)
1100 {
1101 ec_host_cmd_init(ec_host_cmd_backend_get_shi_npcx());
1102 return 0;
1103 }
1104 SYS_INIT(host_cmd_init, POST_KERNEL, CONFIG_EC_HOST_CMD_INIT_PRIORITY);
1105 #endif
1106