1 /*
2 * Copyright (c) 2024 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT snps_designware_ssi
8
9 #include <zephyr/drivers/mspi.h>
10 #include <zephyr/drivers/gpio.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/logging/log.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/sys/byteorder.h>
16 #include <zephyr/sys/util.h>
17
18 #include "mspi_dw.h"
19 #include "mspi_dw_vendor_specific.h"
20
21 LOG_MODULE_REGISTER(mspi_dw, CONFIG_MSPI_LOG_LEVEL);
22
23 #define DUMMY_BYTE 0xAA
24
25 #if defined(CONFIG_MSPI_XIP)
26 struct xip_params {
27 uint32_t read_cmd;
28 uint32_t write_cmd;
29 uint16_t rx_dummy;
30 uint16_t tx_dummy;
31 uint8_t cmd_length;
32 uint8_t addr_length;
33 enum mspi_io_mode io_mode;
34 };
35
36 struct xip_ctrl {
37 uint32_t read;
38 uint32_t write;
39 };
40 #endif
41
42 struct mspi_dw_data {
43 const struct mspi_dev_id *dev_id;
44 uint32_t packets_done;
45 uint8_t *buf_pos;
46 const uint8_t *buf_end;
47
48 uint32_t ctrlr0;
49 uint32_t spi_ctrlr0;
50 uint32_t baudr;
51
52 #if defined(CONFIG_MSPI_XIP)
53 uint32_t xip_freq;
54 struct xip_params xip_params_stored;
55 struct xip_params xip_params_active;
56 uint16_t xip_enabled;
57 enum mspi_cpp_mode xip_cpp;
58 #endif
59
60 uint16_t dummy_bytes;
61 uint8_t bytes_to_discard;
62 uint8_t bytes_per_frame_exp;
63 bool standard_spi;
64 bool suspended;
65
66 struct k_sem finished;
67 /* For synchronization of API calls made from different contexts. */
68 struct k_sem ctx_lock;
69 /* For locking of controller configuration. */
70 struct k_sem cfg_lock;
71 struct mspi_xfer xfer;
72 };
73
74 struct mspi_dw_config {
75 DEVICE_MMIO_ROM;
76 void (*irq_config)(void);
77 uint32_t clock_frequency;
78 #if defined(CONFIG_PINCTRL)
79 const struct pinctrl_dev_config *pcfg;
80 #endif
81 const struct gpio_dt_spec *ce_gpios;
82 uint8_t ce_gpios_len;
83 uint8_t tx_fifo_depth_minus_1;
84 uint8_t tx_fifo_threshold;
85 uint8_t rx_fifo_threshold;
86 DECLARE_REG_ACCESS();
87 bool sw_multi_periph;
88 };
89
90 /* Register access helpers. */
91 #define DEFINE_MM_REG_RD_WR(reg, off) \
92 DEFINE_MM_REG_RD(reg, off) \
93 DEFINE_MM_REG_WR(reg, off)
94
95 DEFINE_MM_REG_WR(ctrlr0, 0x00)
96 DEFINE_MM_REG_WR(ctrlr1, 0x04)
97 DEFINE_MM_REG_WR(ssienr, 0x08)
98 DEFINE_MM_REG_WR(ser, 0x10)
99 DEFINE_MM_REG_WR(baudr, 0x14)
100 DEFINE_MM_REG_RD_WR(txftlr, 0x18)
101 DEFINE_MM_REG_RD_WR(rxftlr, 0x1c)
102 DEFINE_MM_REG_RD(txflr, 0x20)
103 DEFINE_MM_REG_RD(rxflr, 0x24)
104 DEFINE_MM_REG_RD(sr, 0x28)
105 DEFINE_MM_REG_WR(imr, 0x2c)
106 DEFINE_MM_REG_RD(isr, 0x30)
107 DEFINE_MM_REG_RD_WR(dr, 0x60)
108 DEFINE_MM_REG_WR(spi_ctrlr0, 0xf4)
109
110 #if defined(CONFIG_MSPI_XIP)
111 DEFINE_MM_REG_WR(xip_incr_inst, 0x100)
112 DEFINE_MM_REG_WR(xip_wrap_inst, 0x104)
113 DEFINE_MM_REG_WR(xip_ctrl, 0x108)
114 DEFINE_MM_REG_WR(xip_write_incr_inst, 0x140)
115 DEFINE_MM_REG_WR(xip_write_wrap_inst, 0x144)
116 DEFINE_MM_REG_WR(xip_write_ctrl, 0x148)
117 #endif
118
tx_data(const struct device * dev,const struct mspi_xfer_packet * packet)119 static void tx_data(const struct device *dev,
120 const struct mspi_xfer_packet *packet)
121 {
122 struct mspi_dw_data *dev_data = dev->data;
123 const struct mspi_dw_config *dev_config = dev->config;
124 const uint8_t *buf_pos = dev_data->buf_pos;
125 const uint8_t *buf_end = dev_data->buf_end;
126 /* When the function is called, it is known that at least one item
127 * can be written to the FIFO. The loop below writes to the FIFO
128 * the number of items that is known to fit and then updates that
129 * number basing on the actual FIFO level (because some data may get
130 * sent while the FIFO is written; especially for high frequencies
131 * this may often occur) and continues until the FIFO is filled up
132 * or the buffer end is reached.
133 */
134 uint32_t room = 1;
135 uint8_t bytes_per_frame_exp = dev_data->bytes_per_frame_exp;
136 uint8_t tx_fifo_depth = dev_config->tx_fifo_depth_minus_1 + 1;
137 uint32_t data;
138
139 do {
140 if (bytes_per_frame_exp == 2) {
141 data = sys_get_be32(buf_pos);
142 buf_pos += 4;
143 } else if (bytes_per_frame_exp == 1) {
144 data = sys_get_be16(buf_pos);
145 buf_pos += 2;
146 } else {
147 data = *buf_pos;
148 buf_pos += 1;
149 }
150 write_dr(dev, data);
151
152 if (buf_pos >= buf_end) {
153 write_txftlr(dev, 0);
154 break;
155 }
156
157 if (--room == 0) {
158 room = tx_fifo_depth
159 - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev));
160 }
161 } while (room);
162
163 dev_data->buf_pos = (uint8_t *)buf_pos;
164 }
165
make_rx_cycles(const struct device * dev)166 static bool make_rx_cycles(const struct device *dev)
167 {
168 struct mspi_dw_data *dev_data = dev->data;
169 const struct mspi_dw_config *dev_config = dev->config;
170 uint16_t dummy_bytes = dev_data->dummy_bytes;
171 /* See tx_data(). */
172 uint32_t room = 1;
173 uint8_t tx_fifo_depth = dev_config->tx_fifo_depth_minus_1 + 1;
174
175 do {
176 write_dr(dev, DUMMY_BYTE);
177
178 --dummy_bytes;
179 if (!dummy_bytes) {
180 dev_data->dummy_bytes = 0;
181 return true;
182 }
183
184 if (--room == 0) {
185 room = tx_fifo_depth
186 - FIELD_GET(TXFLR_TXTFL_MASK, read_txflr(dev));
187 }
188 } while (room);
189
190 dev_data->dummy_bytes = dummy_bytes;
191 return false;
192 }
193
read_rx_fifo(const struct device * dev,const struct mspi_xfer_packet * packet)194 static void read_rx_fifo(const struct device *dev,
195 const struct mspi_xfer_packet *packet)
196 {
197 struct mspi_dw_data *dev_data = dev->data;
198 const struct mspi_dw_config *dev_config = dev->config;
199 uint8_t bytes_to_discard = dev_data->bytes_to_discard;
200 uint8_t *buf_pos = dev_data->buf_pos;
201 const uint8_t *buf_end = &packet->data_buf[packet->num_bytes];
202 uint8_t bytes_per_frame_exp = dev_data->bytes_per_frame_exp;
203 /* See `room` in tx_data(). */
204 uint32_t in_fifo = 1;
205 uint32_t remaining_frames;
206
207 do {
208 uint32_t data = read_dr(dev);
209
210 if (bytes_to_discard) {
211 --bytes_to_discard;
212 } else {
213 if (bytes_per_frame_exp == 2) {
214 sys_put_be32(data, buf_pos);
215 buf_pos += 4;
216 } else if (bytes_per_frame_exp == 1) {
217 sys_put_be16(data, buf_pos);
218 buf_pos += 2;
219 } else {
220 *buf_pos = (uint8_t)data;
221 buf_pos += 1;
222 }
223
224 if (buf_pos >= buf_end) {
225 dev_data->bytes_to_discard = bytes_to_discard;
226 dev_data->buf_pos = buf_pos;
227 return;
228 }
229 }
230
231 if (--in_fifo == 0) {
232 in_fifo = FIELD_GET(RXFLR_RXTFL_MASK, read_rxflr(dev));
233 }
234 } while (in_fifo);
235
236 remaining_frames = (bytes_to_discard + buf_end - buf_pos)
237 >> bytes_per_frame_exp;
238 if (remaining_frames - 1 < dev_config->rx_fifo_threshold) {
239 write_rxftlr(dev, remaining_frames - 1);
240 }
241
242 dev_data->bytes_to_discard = bytes_to_discard;
243 dev_data->buf_pos = buf_pos;
244 }
245
mspi_dw_isr(const struct device * dev)246 static void mspi_dw_isr(const struct device *dev)
247 {
248 struct mspi_dw_data *dev_data = dev->data;
249 const struct mspi_xfer_packet *packet =
250 &dev_data->xfer.packets[dev_data->packets_done];
251 uint32_t int_status = read_isr(dev);
252
253 if (int_status & ISR_RXFIS_BIT) {
254 read_rx_fifo(dev, packet);
255 }
256
257 if (dev_data->buf_pos >= dev_data->buf_end) {
258 write_imr(dev, 0);
259 /* It may happen that at this point the controller is still
260 * shifting out the last frame (the last interrupt occurs when
261 * the TX FIFO is empty). Wait if it signals that it is busy.
262 */
263 while (read_sr(dev) & SR_BUSY_BIT) {
264 }
265
266 k_sem_give(&dev_data->finished);
267 } else {
268 if (int_status & ISR_TXEIS_BIT) {
269 if (dev_data->dummy_bytes) {
270 if (make_rx_cycles(dev)) {
271 write_imr(dev, IMR_RXFIM_BIT);
272 }
273 } else {
274 tx_data(dev, packet);
275 }
276 }
277 }
278
279 vendor_specific_irq_clear(dev);
280 }
281
api_config(const struct mspi_dt_spec * spec)282 static int api_config(const struct mspi_dt_spec *spec)
283 {
284 ARG_UNUSED(spec);
285
286 return -ENOTSUP;
287 }
288
apply_io_mode(struct mspi_dw_data * dev_data,enum mspi_io_mode io_mode)289 static bool apply_io_mode(struct mspi_dw_data *dev_data,
290 enum mspi_io_mode io_mode)
291 {
292 dev_data->ctrlr0 &= ~CTRLR0_SPI_FRF_MASK;
293 dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_TRANS_TYPE_MASK;
294
295 /* Frame format used for transferring data. */
296
297 if (io_mode == MSPI_IO_MODE_SINGLE) {
298 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK,
299 CTRLR0_SPI_FRF_STANDARD);
300 dev_data->standard_spi = true;
301 return true;
302 }
303
304 dev_data->standard_spi = false;
305
306 switch (io_mode) {
307 case MSPI_IO_MODE_DUAL:
308 case MSPI_IO_MODE_DUAL_1_1_2:
309 case MSPI_IO_MODE_DUAL_1_2_2:
310 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK,
311 CTRLR0_SPI_FRF_DUAL);
312 break;
313 case MSPI_IO_MODE_QUAD:
314 case MSPI_IO_MODE_QUAD_1_1_4:
315 case MSPI_IO_MODE_QUAD_1_4_4:
316 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK,
317 CTRLR0_SPI_FRF_QUAD);
318 break;
319 case MSPI_IO_MODE_OCTAL:
320 case MSPI_IO_MODE_OCTAL_1_1_8:
321 case MSPI_IO_MODE_OCTAL_1_8_8:
322 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SPI_FRF_MASK,
323 CTRLR0_SPI_FRF_OCTAL);
324 break;
325 default:
326 LOG_ERR("IO mode %d not supported", io_mode);
327 return false;
328 }
329
330 /* Transfer format used for Address and Instruction: */
331
332 switch (io_mode) {
333 case MSPI_IO_MODE_DUAL_1_1_2:
334 case MSPI_IO_MODE_QUAD_1_1_4:
335 case MSPI_IO_MODE_OCTAL_1_1_8:
336 /* - both sent in Standard SPI mode */
337 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK,
338 SPI_CTRLR0_TRANS_TYPE_TT0);
339 break;
340 case MSPI_IO_MODE_DUAL_1_2_2:
341 case MSPI_IO_MODE_QUAD_1_4_4:
342 case MSPI_IO_MODE_OCTAL_1_8_8:
343 /* - Instruction sent in Standard SPI mode,
344 * Address sent the same way as data
345 */
346 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK,
347 SPI_CTRLR0_TRANS_TYPE_TT1);
348 break;
349 default:
350 /* - both sent the same way as data. */
351 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_TRANS_TYPE_MASK,
352 SPI_CTRLR0_TRANS_TYPE_TT2);
353 break;
354 }
355
356 return true;
357 }
358
apply_cmd_length(struct mspi_dw_data * dev_data,uint32_t cmd_length)359 static bool apply_cmd_length(struct mspi_dw_data *dev_data, uint32_t cmd_length)
360 {
361 switch (cmd_length) {
362 case 0:
363 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK,
364 SPI_CTRLR0_INST_L0);
365 break;
366 case 1:
367 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK,
368 SPI_CTRLR0_INST_L8);
369 break;
370 case 2:
371 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_INST_L_MASK,
372 SPI_CTRLR0_INST_L16);
373 break;
374 default:
375 LOG_ERR("Command length %d not supported", cmd_length);
376 return false;
377 }
378
379 return true;
380 }
381
apply_addr_length(struct mspi_dw_data * dev_data,uint32_t addr_length)382 static bool apply_addr_length(struct mspi_dw_data *dev_data,
383 uint32_t addr_length)
384 {
385 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_ADDR_L_MASK,
386 addr_length * 2);
387
388 return true;
389 }
390
391 #if defined(CONFIG_MSPI_XIP)
apply_xip_io_mode(const struct mspi_dw_data * dev_data,struct xip_ctrl * ctrl)392 static bool apply_xip_io_mode(const struct mspi_dw_data *dev_data,
393 struct xip_ctrl *ctrl)
394 {
395 enum mspi_io_mode io_mode = dev_data->xip_params_active.io_mode;
396
397 /* Frame format used for transferring data. */
398
399 if (io_mode == MSPI_IO_MODE_SINGLE) {
400 LOG_ERR("XIP not available in single line mode");
401 return false;
402 }
403
404 switch (io_mode) {
405 case MSPI_IO_MODE_DUAL:
406 case MSPI_IO_MODE_DUAL_1_1_2:
407 case MSPI_IO_MODE_DUAL_1_2_2:
408 ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK,
409 XIP_CTRL_FRF_DUAL);
410 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK,
411 XIP_WRITE_CTRL_FRF_DUAL);
412 break;
413 case MSPI_IO_MODE_QUAD:
414 case MSPI_IO_MODE_QUAD_1_1_4:
415 case MSPI_IO_MODE_QUAD_1_4_4:
416 ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK,
417 XIP_CTRL_FRF_QUAD);
418 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK,
419 XIP_WRITE_CTRL_FRF_QUAD);
420 break;
421 case MSPI_IO_MODE_OCTAL:
422 case MSPI_IO_MODE_OCTAL_1_1_8:
423 case MSPI_IO_MODE_OCTAL_1_8_8:
424 ctrl->read |= FIELD_PREP(XIP_CTRL_FRF_MASK,
425 XIP_CTRL_FRF_OCTAL);
426 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_FRF_MASK,
427 XIP_WRITE_CTRL_FRF_OCTAL);
428 break;
429 default:
430 LOG_ERR("IO mode %d not supported", io_mode);
431 return false;
432 }
433
434 /* Transfer format used for Address and Instruction: */
435
436 switch (io_mode) {
437 case MSPI_IO_MODE_DUAL_1_1_2:
438 case MSPI_IO_MODE_QUAD_1_1_4:
439 case MSPI_IO_MODE_OCTAL_1_1_8:
440 /* - both sent in Standard SPI mode */
441 ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK,
442 XIP_CTRL_TRANS_TYPE_TT0);
443 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK,
444 XIP_WRITE_CTRL_TRANS_TYPE_TT0);
445 break;
446 case MSPI_IO_MODE_DUAL_1_2_2:
447 case MSPI_IO_MODE_QUAD_1_4_4:
448 case MSPI_IO_MODE_OCTAL_1_8_8:
449 /* - Instruction sent in Standard SPI mode,
450 * Address sent the same way as data
451 */
452 ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK,
453 XIP_CTRL_TRANS_TYPE_TT1);
454 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK,
455 XIP_WRITE_CTRL_TRANS_TYPE_TT1);
456 break;
457 default:
458 /* - both sent the same way as data. */
459 ctrl->read |= FIELD_PREP(XIP_CTRL_TRANS_TYPE_MASK,
460 XIP_CTRL_TRANS_TYPE_TT2);
461 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_TRANS_TYPE_MASK,
462 XIP_WRITE_CTRL_TRANS_TYPE_TT2);
463 break;
464 }
465
466 return true;
467 }
468
apply_xip_cmd_length(const struct mspi_dw_data * dev_data,struct xip_ctrl * ctrl)469 static bool apply_xip_cmd_length(const struct mspi_dw_data *dev_data,
470 struct xip_ctrl *ctrl)
471 {
472 uint8_t cmd_length = dev_data->xip_params_active.cmd_length;
473
474 switch (cmd_length) {
475 case 0:
476 ctrl->read |= FIELD_PREP(XIP_CTRL_INST_L_MASK,
477 XIP_CTRL_INST_L0);
478 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK,
479 XIP_WRITE_CTRL_INST_L0);
480 break;
481 case 1:
482 ctrl->read |= XIP_CTRL_INST_EN_BIT
483 | FIELD_PREP(XIP_CTRL_INST_L_MASK,
484 XIP_CTRL_INST_L8);
485 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK,
486 XIP_WRITE_CTRL_INST_L8);
487 break;
488 case 2:
489 ctrl->read |= XIP_CTRL_INST_EN_BIT
490 | FIELD_PREP(XIP_CTRL_INST_L_MASK,
491 XIP_CTRL_INST_L16);
492 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_INST_L_MASK,
493 XIP_WRITE_CTRL_INST_L16);
494 break;
495 default:
496 LOG_ERR("Command length %d not supported", cmd_length);
497 return false;
498 }
499
500 return true;
501 }
502
apply_xip_addr_length(const struct mspi_dw_data * dev_data,struct xip_ctrl * ctrl)503 static bool apply_xip_addr_length(const struct mspi_dw_data *dev_data,
504 struct xip_ctrl *ctrl)
505 {
506 uint8_t addr_length = dev_data->xip_params_active.addr_length;
507
508 ctrl->read |= FIELD_PREP(XIP_CTRL_ADDR_L_MASK, addr_length * 2);
509 ctrl->write |= FIELD_PREP(XIP_WRITE_CTRL_ADDR_L_MASK, addr_length * 2);
510
511 return true;
512 }
513 #endif /* defined(CONFIG_MSPI_XIP) */
514
_api_dev_config(const struct device * dev,const enum mspi_dev_cfg_mask param_mask,const struct mspi_dev_cfg * cfg)515 static int _api_dev_config(const struct device *dev,
516 const enum mspi_dev_cfg_mask param_mask,
517 const struct mspi_dev_cfg *cfg)
518 {
519 const struct mspi_dw_config *dev_config = dev->config;
520 struct mspi_dw_data *dev_data = dev->data;
521
522 if (param_mask & MSPI_DEVICE_CONFIG_ENDIAN) {
523 if (cfg->endian != MSPI_XFER_BIG_ENDIAN) {
524 LOG_ERR("Only big endian transfers are supported.");
525 return -ENOTSUP;
526 }
527 }
528
529 if (param_mask & MSPI_DEVICE_CONFIG_CE_POL) {
530 if (cfg->ce_polarity != MSPI_CE_ACTIVE_LOW) {
531 LOG_ERR("Only active low CE is supported.");
532 return -ENOTSUP;
533 }
534 }
535
536 if (param_mask & MSPI_DEVICE_CONFIG_MEM_BOUND) {
537 if (cfg->mem_boundary) {
538 LOG_ERR("Auto CE break is not supported.");
539 return -ENOTSUP;
540 }
541 }
542
543 if (param_mask & MSPI_DEVICE_CONFIG_BREAK_TIME) {
544 if (cfg->time_to_break) {
545 LOG_ERR("Auto CE break is not supported.");
546 return -ENOTSUP;
547 }
548 }
549
550 if (param_mask & MSPI_DEVICE_CONFIG_IO_MODE) {
551 #if defined(CONFIG_MSPI_XIP)
552 dev_data->xip_params_stored.io_mode = cfg->io_mode;
553 #endif
554
555 if (!apply_io_mode(dev_data, cfg->io_mode)) {
556 return -EINVAL;
557 }
558 }
559
560 if (param_mask & MSPI_DEVICE_CONFIG_CPP) {
561 #if defined(CONFIG_MSPI_XIP)
562 /* Make sure the new setting is compatible with the one used
563 * for XIP if it is enabled.
564 */
565 if (!dev_data->xip_enabled) {
566 dev_data->xip_cpp = cfg->cpp;
567 } else if (dev_data->xip_cpp != cfg->cpp) {
568 LOG_ERR("Conflict with configuration used for XIP.");
569 return -EINVAL;
570 }
571 #endif
572
573 dev_data->ctrlr0 &= ~(CTRLR0_SCPOL_BIT | CTRLR0_SCPH_BIT);
574
575 switch (cfg->cpp) {
576 default:
577 case MSPI_CPP_MODE_0:
578 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 0) |
579 FIELD_PREP(CTRLR0_SCPH_BIT, 0);
580 break;
581 case MSPI_CPP_MODE_1:
582 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 0) |
583 FIELD_PREP(CTRLR0_SCPH_BIT, 1);
584 break;
585 case MSPI_CPP_MODE_2:
586 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 1) |
587 FIELD_PREP(CTRLR0_SCPH_BIT, 0);
588 break;
589 case MSPI_CPP_MODE_3:
590 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_SCPOL_BIT, 1) |
591 FIELD_PREP(CTRLR0_SCPH_BIT, 1);
592 break;
593 }
594 }
595
596 if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) {
597 if (cfg->freq > dev_config->clock_frequency / 2 ||
598 cfg->freq < dev_config->clock_frequency / 65534) {
599 LOG_ERR("Invalid frequency: %u, MIN: %u, MAX: %u",
600 cfg->freq, dev_config->clock_frequency / 65534,
601 dev_config->clock_frequency / 2);
602 return -EINVAL;
603 }
604
605 #if defined(CONFIG_MSPI_XIP)
606 /* Make sure the new setting is compatible with the one used
607 * for XIP if it is enabled.
608 */
609 if (!dev_data->xip_enabled) {
610 dev_data->xip_freq = cfg->freq;
611 } else if (dev_data->xip_freq != cfg->freq) {
612 LOG_ERR("Conflict with configuration used for XIP.");
613 return -EINVAL;
614 }
615 #endif
616
617 dev_data->baudr = dev_config->clock_frequency / cfg->freq;
618 }
619
620 if (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE) {
621 /* TODO: add support for DDR */
622 if (cfg->data_rate != MSPI_DATA_RATE_SINGLE) {
623 LOG_ERR("Only single data rate is supported.");
624 return -ENOTSUP;
625 }
626 }
627
628 if (param_mask & MSPI_DEVICE_CONFIG_DQS) {
629 /* TODO: add support for DQS */
630 if (cfg->dqs_enable) {
631 LOG_ERR("DQS line is not supported.");
632 return -ENOTSUP;
633 }
634 }
635
636 #if defined(CONFIG_MSPI_XIP)
637 if (param_mask & MSPI_DEVICE_CONFIG_READ_CMD) {
638 dev_data->xip_params_stored.read_cmd = cfg->read_cmd;
639 }
640 if (param_mask & MSPI_DEVICE_CONFIG_WRITE_CMD) {
641 dev_data->xip_params_stored.write_cmd = cfg->write_cmd;
642 }
643 if (param_mask & MSPI_DEVICE_CONFIG_RX_DUMMY) {
644 dev_data->xip_params_stored.rx_dummy = cfg->rx_dummy;
645 }
646 if (param_mask & MSPI_DEVICE_CONFIG_TX_DUMMY) {
647 dev_data->xip_params_stored.tx_dummy = cfg->tx_dummy;
648 }
649 if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) {
650 dev_data->xip_params_stored.cmd_length = cfg->cmd_length;
651 }
652 if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) {
653 dev_data->xip_params_stored.addr_length = cfg->addr_length;
654 }
655 #endif
656
657 /* Always use Motorola SPI frame format. */
658 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_FRF_MASK, CTRLR0_FRF_SPI);
659 /* Enable clock stretching. */
660 dev_data->spi_ctrlr0 |= SPI_CTRLR0_CLK_STRETCH_EN_BIT;
661
662 return 0;
663 }
664
api_dev_config(const struct device * dev,const struct mspi_dev_id * dev_id,const enum mspi_dev_cfg_mask param_mask,const struct mspi_dev_cfg * cfg)665 static int api_dev_config(const struct device *dev,
666 const struct mspi_dev_id *dev_id,
667 const enum mspi_dev_cfg_mask param_mask,
668 const struct mspi_dev_cfg *cfg)
669 {
670 const struct mspi_dw_config *dev_config = dev->config;
671 struct mspi_dw_data *dev_data = dev->data;
672 int rc;
673
674 if (dev_id != dev_data->dev_id) {
675 rc = k_sem_take(&dev_data->cfg_lock,
676 K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE));
677 if (rc < 0) {
678 LOG_ERR("Failed to switch controller to device");
679 return -EBUSY;
680 }
681
682 dev_data->dev_id = dev_id;
683 }
684
685 if (param_mask == MSPI_DEVICE_CONFIG_NONE &&
686 !dev_config->sw_multi_periph) {
687 return 0;
688 }
689
690 (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER);
691
692 rc = _api_dev_config(dev, param_mask, cfg);
693
694 k_sem_give(&dev_data->ctx_lock);
695
696 if (rc < 0) {
697 dev_data->dev_id = NULL;
698 k_sem_give(&dev_data->cfg_lock);
699 }
700
701 return rc;
702 }
703
api_get_channel_status(const struct device * dev,uint8_t ch)704 static int api_get_channel_status(const struct device *dev, uint8_t ch)
705 {
706 ARG_UNUSED(ch);
707
708 struct mspi_dw_data *dev_data = dev->data;
709
710 (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER);
711
712 dev_data->dev_id = NULL;
713 k_sem_give(&dev_data->cfg_lock);
714
715 k_sem_give(&dev_data->ctx_lock);
716
717 return 0;
718 }
719
tx_control_field(const struct device * dev,uint32_t field,uint8_t len)720 static void tx_control_field(const struct device *dev,
721 uint32_t field, uint8_t len)
722 {
723 uint8_t shift = 8 * len;
724
725 do {
726 shift -= 8;
727 write_dr(dev, field >> shift);
728 } while (shift);
729 }
730
start_next_packet(const struct device * dev,k_timeout_t timeout)731 static int start_next_packet(const struct device *dev, k_timeout_t timeout)
732 {
733 const struct mspi_dw_config *dev_config = dev->config;
734 struct mspi_dw_data *dev_data = dev->data;
735 const struct mspi_xfer_packet *packet =
736 &dev_data->xfer.packets[dev_data->packets_done];
737 bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP,
738 (dev_data->xip_enabled != 0),
739 (false));
740 unsigned int key;
741 uint8_t tx_fifo_threshold;
742 uint32_t packet_frames;
743 uint32_t imr;
744 int rc = 0;
745
746 if (packet->num_bytes == 0 &&
747 dev_data->xfer.cmd_length == 0 &&
748 dev_data->xfer.addr_length == 0) {
749 return 0;
750 }
751
752 dev_data->dummy_bytes = 0;
753
754 dev_data->ctrlr0 &= ~CTRLR0_TMOD_MASK
755 & ~CTRLR0_DFS_MASK;
756
757 dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_WAIT_CYCLES_MASK;
758
759 if (dev_data->standard_spi &&
760 (dev_data->xfer.cmd_length != 0 ||
761 dev_data->xfer.addr_length != 0)) {
762 dev_data->bytes_per_frame_exp = 0;
763 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 7);
764 } else {
765 if ((packet->num_bytes % 4) == 0) {
766 dev_data->bytes_per_frame_exp = 2;
767 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 31);
768 } else if ((packet->num_bytes % 2) == 0) {
769 dev_data->bytes_per_frame_exp = 1;
770 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 15);
771 } else {
772 dev_data->bytes_per_frame_exp = 0;
773 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_DFS_MASK, 7);
774 }
775 }
776
777 packet_frames = packet->num_bytes >> dev_data->bytes_per_frame_exp;
778
779 if (packet_frames > UINT16_MAX + 1) {
780 LOG_ERR("Packet length (%u) exceeds supported maximum",
781 packet->num_bytes);
782 return -EINVAL;
783 }
784
785 if (packet->dir == MSPI_TX || packet->num_bytes == 0) {
786 imr = IMR_TXEIM_BIT;
787 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK,
788 CTRLR0_TMOD_TX);
789 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK,
790 dev_data->xfer.tx_dummy);
791
792 write_rxftlr(dev, 0);
793 tx_fifo_threshold = dev_config->tx_fifo_threshold;
794 } else {
795 uint32_t tmod;
796 uint8_t rx_fifo_threshold;
797
798 /* In Standard SPI Mode, the controller does not support
799 * sending the command and address fields separately, they
800 * need to be sent as data; hence, for RX packets with these
801 * fields, the TX/RX transfer mode needs to be used and
802 * consequently, dummy bytes need to be transmitted so that
803 * clock cycles for the RX part are provided (the controller
804 * does not do it automatically in the TX/RX mode).
805 */
806 if (dev_data->standard_spi &&
807 (dev_data->xfer.cmd_length != 0 ||
808 dev_data->xfer.addr_length != 0)) {
809 uint32_t rx_total_bytes;
810
811 dev_data->bytes_to_discard = dev_data->xfer.cmd_length
812 + dev_data->xfer.addr_length;
813 rx_total_bytes = dev_data->bytes_to_discard
814 + packet->num_bytes;
815
816 dev_data->dummy_bytes = packet->num_bytes;
817
818 imr = IMR_TXEIM_BIT | IMR_RXFIM_BIT;
819 tmod = CTRLR0_TMOD_TX_RX;
820 tx_fifo_threshold = dev_config->tx_fifo_threshold;
821 /* For standard SPI, only 1-byte frames are used. */
822 rx_fifo_threshold = MIN(rx_total_bytes - 1,
823 dev_config->rx_fifo_threshold);
824 } else {
825 imr = IMR_RXFIM_BIT;
826 tmod = CTRLR0_TMOD_RX;
827 tx_fifo_threshold = 0;
828 rx_fifo_threshold = MIN(packet_frames - 1,
829 dev_config->rx_fifo_threshold);
830 }
831
832 dev_data->ctrlr0 |= FIELD_PREP(CTRLR0_TMOD_MASK, tmod);
833 dev_data->spi_ctrlr0 |= FIELD_PREP(SPI_CTRLR0_WAIT_CYCLES_MASK,
834 dev_data->xfer.rx_dummy);
835
836 write_rxftlr(dev, FIELD_PREP(RXFTLR_RFT_MASK,
837 rx_fifo_threshold));
838 }
839
840 if (dev_data->dev_id->ce.port) {
841 rc = gpio_pin_set_dt(&dev_data->dev_id->ce, 1);
842 if (rc < 0) {
843 LOG_ERR("Failed to activate CE line (%d)", rc);
844 return rc;
845 }
846 }
847
848 if (xip_enabled) {
849 key = irq_lock();
850 write_ssienr(dev, 0);
851 }
852
853 /* These registers cannot be written when the controller is enabled,
854 * that's why it is temporarily disabled above; with locked interrupts,
855 * to prevent potential XIP transfers during that period.
856 */
857 write_ctrlr0(dev, dev_data->ctrlr0);
858 write_ctrlr1(dev, packet_frames > 0
859 ? FIELD_PREP(CTRLR1_NDF_MASK, packet_frames - 1)
860 : 0);
861 write_spi_ctrlr0(dev, dev_data->spi_ctrlr0);
862 write_baudr(dev, dev_data->baudr);
863 write_ser(dev, BIT(dev_data->dev_id->dev_idx));
864
865 if (xip_enabled) {
866 write_ssienr(dev, SSIENR_SSIC_EN_BIT);
867 irq_unlock(key);
868 }
869
870 dev_data->buf_pos = packet->data_buf;
871 dev_data->buf_end = &packet->data_buf[packet->num_bytes];
872
873 if ((imr & IMR_TXEIM_BIT) && dev_data->buf_pos < dev_data->buf_end) {
874 uint32_t start_level = tx_fifo_threshold;
875
876 if (dev_data->dummy_bytes) {
877 uint32_t tx_total = dev_data->bytes_to_discard
878 + dev_data->dummy_bytes;
879
880 if (start_level > tx_total - 1) {
881 start_level = tx_total - 1;
882 }
883 }
884
885 write_txftlr(dev,
886 FIELD_PREP(TXFTLR_TXFTHR_MASK, start_level) |
887 FIELD_PREP(TXFTLR_TFT_MASK, tx_fifo_threshold));
888 } else {
889 write_txftlr(dev, 0);
890 }
891
892 /* Ensure that there will be no interrupt from the controller yet. */
893 write_imr(dev, 0);
894 /* Enable the controller. This must be done before DR is written. */
895 write_ssienr(dev, SSIENR_SSIC_EN_BIT);
896
897 if (dev_data->standard_spi) {
898 if (dev_data->xfer.cmd_length) {
899 tx_control_field(dev, packet->cmd,
900 dev_data->xfer.cmd_length);
901 }
902
903 if (dev_data->xfer.addr_length) {
904 tx_control_field(dev, packet->address,
905 dev_data->xfer.addr_length);
906 }
907 } else {
908 if (dev_data->xfer.cmd_length) {
909 write_dr(dev, packet->cmd);
910 }
911
912 if (dev_data->xfer.addr_length) {
913 write_dr(dev, packet->address);
914 }
915 }
916
917 if (dev_data->dummy_bytes) {
918 if (make_rx_cycles(dev)) {
919 imr = IMR_RXFIM_BIT;
920 }
921 } else if (packet->dir == MSPI_TX && packet->num_bytes) {
922 tx_data(dev, packet);
923 }
924
925 /* Enable interrupts now and wait until the packet is done. */
926 write_imr(dev, imr);
927
928 rc = k_sem_take(&dev_data->finished, timeout);
929 if (rc < 0) {
930 rc = -ETIMEDOUT;
931 }
932
933 /* Disable the controller. This will immediately halt the transfer
934 * if it hasn't finished yet.
935 */
936 if (xip_enabled) {
937 /* If XIP is enabled, the controller must be kept enabled,
938 * so disable it only momentarily if there's a need to halt
939 * a transfer that has timeout out.
940 */
941 if (rc == -ETIMEDOUT) {
942 key = irq_lock();
943
944 write_ssienr(dev, 0);
945 write_ssienr(dev, SSIENR_SSIC_EN_BIT);
946
947 irq_unlock(key);
948 }
949 } else {
950 write_ssienr(dev, 0);
951 }
952
953 if (dev_data->dev_id->ce.port) {
954 int rc2;
955
956 /* Do not use `rc` to not overwrite potential timeout error. */
957 rc2 = gpio_pin_set_dt(&dev_data->dev_id->ce, 0);
958 if (rc2 < 0) {
959 LOG_ERR("Failed to deactivate CE line (%d)", rc2);
960 return rc2;
961 }
962 }
963
964 return rc;
965 }
966
_api_transceive(const struct device * dev,const struct mspi_xfer * req)967 static int _api_transceive(const struct device *dev,
968 const struct mspi_xfer *req)
969 {
970 struct mspi_dw_data *dev_data = dev->data;
971 int rc;
972
973 dev_data->spi_ctrlr0 &= ~SPI_CTRLR0_WAIT_CYCLES_MASK
974 & ~SPI_CTRLR0_INST_L_MASK
975 & ~SPI_CTRLR0_ADDR_L_MASK;
976
977 if (!apply_cmd_length(dev_data, req->cmd_length) ||
978 !apply_addr_length(dev_data, req->addr_length)) {
979 return -EINVAL;
980 }
981
982 if (dev_data->standard_spi &&
983 (req->rx_dummy != 0 || req->tx_dummy != 0)) {
984 LOG_ERR("Dummy cycles unsupported in single line mode");
985 return -EINVAL;
986 } else if (req->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX ||
987 req->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) {
988 LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles",
989 req->rx_dummy, req->tx_dummy);
990 return -EINVAL;
991 }
992
993 dev_data->xfer = *req;
994
995 for (dev_data->packets_done = 0;
996 dev_data->packets_done < dev_data->xfer.num_packet;
997 dev_data->packets_done++) {
998 rc = start_next_packet(dev, K_MSEC(dev_data->xfer.timeout));
999 if (rc < 0) {
1000 return rc;
1001 }
1002 }
1003
1004 return 0;
1005 }
1006
api_transceive(const struct device * dev,const struct mspi_dev_id * dev_id,const struct mspi_xfer * req)1007 static int api_transceive(const struct device *dev,
1008 const struct mspi_dev_id *dev_id,
1009 const struct mspi_xfer *req)
1010 {
1011 struct mspi_dw_data *dev_data = dev->data;
1012 int rc, rc2;
1013
1014 if (dev_id != dev_data->dev_id) {
1015 LOG_ERR("Controller is not configured for this device");
1016 return -EINVAL;
1017 }
1018
1019 /* TODO: add support for asynchronous transfers */
1020 if (req->async) {
1021 LOG_ERR("Asynchronous transfers are not supported");
1022 return -ENOTSUP;
1023 }
1024
1025 rc = pm_device_runtime_get(dev);
1026 if (rc < 0) {
1027 LOG_ERR("pm_device_runtime_get() failed: %d", rc);
1028 return rc;
1029 }
1030
1031 (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER);
1032
1033 if (dev_data->suspended) {
1034 rc = -EFAULT;
1035 } else {
1036 rc = _api_transceive(dev, req);
1037 }
1038
1039 k_sem_give(&dev_data->ctx_lock);
1040
1041 rc2 = pm_device_runtime_put(dev);
1042 if (rc2 < 0) {
1043 LOG_ERR("pm_device_runtime_put() failed: %d", rc2);
1044 rc = (rc < 0 ? rc : rc2);
1045 }
1046
1047 return rc;
1048 }
1049
1050 #if defined(CONFIG_MSPI_XIP)
_api_xip_config(const struct device * dev,const struct mspi_dev_id * dev_id,const struct mspi_xip_cfg * cfg)1051 static int _api_xip_config(const struct device *dev,
1052 const struct mspi_dev_id *dev_id,
1053 const struct mspi_xip_cfg *cfg)
1054 {
1055 struct mspi_dw_data *dev_data = dev->data;
1056 int rc;
1057
1058 if (!cfg->enable) {
1059 rc = vendor_specific_xip_disable(dev, dev_id, cfg);
1060 if (rc < 0) {
1061 return rc;
1062 }
1063
1064 dev_data->xip_enabled &= ~BIT(dev_id->dev_idx);
1065
1066 if (!dev_data->xip_enabled) {
1067 write_ssienr(dev, 0);
1068
1069 /* Since XIP is disabled, it is okay for the controller
1070 * to be suspended.
1071 */
1072 rc = pm_device_runtime_put(dev);
1073 if (rc < 0) {
1074 LOG_ERR("pm_device_runtime_put() failed: %d", rc);
1075 return rc;
1076 }
1077 }
1078
1079 return 0;
1080 }
1081
1082 if (!dev_data->xip_enabled) {
1083 struct xip_params *params = &dev_data->xip_params_active;
1084 struct xip_ctrl ctrl = {0};
1085
1086 *params = dev_data->xip_params_stored;
1087
1088 if (!apply_xip_io_mode(dev_data, &ctrl) ||
1089 !apply_xip_cmd_length(dev_data, &ctrl) ||
1090 !apply_xip_addr_length(dev_data, &ctrl)) {
1091 return -EINVAL;
1092 }
1093
1094 if (params->rx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX ||
1095 params->tx_dummy > SPI_CTRLR0_WAIT_CYCLES_MAX) {
1096 LOG_ERR("Unsupported RX (%u) or TX (%u) dummy cycles",
1097 params->rx_dummy, params->tx_dummy);
1098 return -EINVAL;
1099 }
1100
1101 /* Increase usage count additionally to prevent the controller
1102 * from being suspended as long as XIP is active.
1103 */
1104 rc = pm_device_runtime_get(dev);
1105 if (rc < 0) {
1106 LOG_ERR("pm_device_runtime_get() failed: %d", rc);
1107 return rc;
1108 }
1109
1110 ctrl.read |= FIELD_PREP(XIP_CTRL_WAIT_CYCLES_MASK,
1111 params->rx_dummy);
1112 ctrl.write |= FIELD_PREP(XIP_WRITE_CTRL_WAIT_CYCLES_MASK,
1113 params->tx_dummy);
1114
1115 /* Make sure the baud rate and serial clock phase/polarity
1116 * registers are configured properly. They may not be if
1117 * non-XIP transfers have not been performed yet.
1118 */
1119 write_ctrlr0(dev, dev_data->ctrlr0);
1120 write_baudr(dev, dev_data->baudr);
1121
1122 write_xip_incr_inst(dev, params->read_cmd);
1123 write_xip_wrap_inst(dev, params->read_cmd);
1124 write_xip_ctrl(dev, ctrl.read);
1125 write_xip_write_incr_inst(dev, params->write_cmd);
1126 write_xip_write_wrap_inst(dev, params->write_cmd);
1127 write_xip_write_ctrl(dev, ctrl.write);
1128 } else if (dev_data->xip_params_active.read_cmd !=
1129 dev_data->xip_params_stored.read_cmd ||
1130 dev_data->xip_params_active.write_cmd !=
1131 dev_data->xip_params_stored.write_cmd ||
1132 dev_data->xip_params_active.cmd_length !=
1133 dev_data->xip_params_stored.cmd_length ||
1134 dev_data->xip_params_active.addr_length !=
1135 dev_data->xip_params_stored.addr_length ||
1136 dev_data->xip_params_active.rx_dummy !=
1137 dev_data->xip_params_stored.rx_dummy ||
1138 dev_data->xip_params_active.tx_dummy !=
1139 dev_data->xip_params_stored.tx_dummy) {
1140 LOG_ERR("Conflict with configuration already used for XIP.");
1141 return -EINVAL;
1142 }
1143
1144 rc = vendor_specific_xip_enable(dev, dev_id, cfg);
1145 if (rc < 0) {
1146 return rc;
1147 }
1148
1149 write_ssienr(dev, SSIENR_SSIC_EN_BIT);
1150
1151 dev_data->xip_enabled |= BIT(dev_id->dev_idx);
1152
1153 return 0;
1154 }
1155
api_xip_config(const struct device * dev,const struct mspi_dev_id * dev_id,const struct mspi_xip_cfg * cfg)1156 static int api_xip_config(const struct device *dev,
1157 const struct mspi_dev_id *dev_id,
1158 const struct mspi_xip_cfg *cfg)
1159 {
1160 struct mspi_dw_data *dev_data = dev->data;
1161 int rc, rc2;
1162
1163 if (cfg->enable && dev_id != dev_data->dev_id) {
1164 LOG_ERR("Controller is not configured for this device");
1165 return -EINVAL;
1166 }
1167
1168 rc = pm_device_runtime_get(dev);
1169 if (rc < 0) {
1170 LOG_ERR("pm_device_runtime_get() failed: %d", rc);
1171 return rc;
1172 }
1173
1174 (void)k_sem_take(&dev_data->ctx_lock, K_FOREVER);
1175
1176 if (dev_data->suspended) {
1177 rc = -EFAULT;
1178 } else {
1179 rc = _api_xip_config(dev, dev_id, cfg);
1180 }
1181
1182 k_sem_give(&dev_data->ctx_lock);
1183
1184 rc2 = pm_device_runtime_put(dev);
1185 if (rc2 < 0) {
1186 LOG_ERR("pm_device_runtime_put() failed: %d", rc2);
1187 rc = (rc < 0 ? rc : rc2);
1188 }
1189
1190 return rc;
1191 }
1192 #endif /* defined(CONFIG_MSPI_XIP) */
1193
dev_pm_action_cb(const struct device * dev,enum pm_device_action action)1194 static int dev_pm_action_cb(const struct device *dev,
1195 enum pm_device_action action)
1196 {
1197 struct mspi_dw_data *dev_data = dev->data;
1198
1199 if (action == PM_DEVICE_ACTION_RESUME) {
1200 #if defined(CONFIG_PINCTRL)
1201 const struct mspi_dw_config *dev_config = dev->config;
1202 int rc = pinctrl_apply_state(dev_config->pcfg,
1203 PINCTRL_STATE_DEFAULT);
1204
1205 if (rc < 0) {
1206 LOG_ERR("Cannot apply default pins state (%d)", rc);
1207 return rc;
1208 }
1209 #endif
1210 vendor_specific_resume(dev);
1211
1212 dev_data->suspended = false;
1213
1214 return 0;
1215 }
1216
1217 if (IS_ENABLED(CONFIG_PM_DEVICE) &&
1218 action == PM_DEVICE_ACTION_SUSPEND) {
1219 bool xip_enabled = COND_CODE_1(CONFIG_MSPI_XIP,
1220 (dev_data->xip_enabled != 0),
1221 (false));
1222
1223 #if defined(CONFIG_PINCTRL)
1224 const struct mspi_dw_config *dev_config = dev->config;
1225 int rc = pinctrl_apply_state(dev_config->pcfg,
1226 PINCTRL_STATE_SLEEP);
1227
1228 if (rc < 0) {
1229 LOG_ERR("Cannot apply sleep pins state (%d)", rc);
1230 return rc;
1231 }
1232 #endif
1233 if (xip_enabled ||
1234 k_sem_take(&dev_data->ctx_lock, K_NO_WAIT) != 0) {
1235 LOG_ERR("Controller in use, cannot be suspended");
1236 return -EBUSY;
1237 }
1238
1239 dev_data->suspended = true;
1240
1241 vendor_specific_suspend(dev);
1242
1243 k_sem_give(&dev_data->ctx_lock);
1244
1245 return 0;
1246 }
1247
1248 return -ENOTSUP;
1249 }
1250
dev_init(const struct device * dev)1251 static int dev_init(const struct device *dev)
1252 {
1253 struct mspi_dw_data *dev_data = dev->data;
1254 const struct mspi_dw_config *dev_config = dev->config;
1255 const struct gpio_dt_spec *ce_gpio;
1256 int rc;
1257
1258 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
1259
1260 vendor_specific_init(dev);
1261
1262 dev_config->irq_config();
1263
1264 k_sem_init(&dev_data->finished, 0, 1);
1265 k_sem_init(&dev_data->cfg_lock, 1, 1);
1266 k_sem_init(&dev_data->ctx_lock, 1, 1);
1267
1268 for (ce_gpio = dev_config->ce_gpios;
1269 ce_gpio < &dev_config->ce_gpios[dev_config->ce_gpios_len];
1270 ce_gpio++) {
1271 if (!device_is_ready(ce_gpio->port)) {
1272 LOG_ERR("CE GPIO port %s is not ready",
1273 ce_gpio->port->name);
1274 return -ENODEV;
1275 }
1276
1277 rc = gpio_pin_configure_dt(ce_gpio, GPIO_OUTPUT_INACTIVE);
1278 if (rc < 0) {
1279 return rc;
1280 }
1281 }
1282
1283 #if defined(CONFIG_PINCTRL)
1284 if (IS_ENABLED(CONFIG_PM_DEVICE_RUNTIME)) {
1285 rc = pinctrl_apply_state(dev_config->pcfg, PINCTRL_STATE_SLEEP);
1286 if (rc < 0) {
1287 LOG_ERR("Cannot apply sleep pins state (%d)", rc);
1288 return rc;
1289 }
1290 }
1291 #endif
1292
1293 return pm_device_driver_init(dev, dev_pm_action_cb);
1294 }
1295
1296 static DEVICE_API(mspi, drv_api) = {
1297 .config = api_config,
1298 .dev_config = api_dev_config,
1299 .get_channel_status = api_get_channel_status,
1300 .transceive = api_transceive,
1301 #if defined(CONFIG_MSPI_XIP)
1302 .xip_config = api_xip_config,
1303 #endif
1304 };
1305
1306 #define MSPI_DW_INST_IRQ(idx, inst) \
1307 IRQ_CONNECT(DT_INST_IRQ_BY_IDX(inst, idx, irq), \
1308 DT_INST_IRQ_BY_IDX(inst, idx, priority), \
1309 mspi_dw_isr, DEVICE_DT_INST_GET(inst), 0); \
1310 irq_enable(DT_INST_IRQ_BY_IDX(inst, idx, irq))
1311
1312 #define MSPI_DW_MMIO_ROM_INIT(node_id) \
1313 COND_CODE_1(DT_REG_HAS_NAME(node_id, core), \
1314 (Z_DEVICE_MMIO_NAMED_ROM_INITIALIZER(core, node_id)), \
1315 (DEVICE_MMIO_ROM_INIT(node_id)))
1316
1317 #define MSPI_DW_CLOCK_FREQUENCY(inst) \
1318 COND_CODE_1(DT_NODE_HAS_PROP(DT_INST_PHANDLE(inst, clocks), \
1319 clock_frequency), \
1320 (DT_INST_PROP_BY_PHANDLE(inst, clocks, \
1321 clock_frequency)), \
1322 (DT_INST_PROP(inst, clock_frequency)))
1323
1324 #define MSPI_DW_DT_INST_PROP(inst, prop) .prop = DT_INST_PROP(inst, prop)
1325
1326 #define FOREACH_CE_GPIOS_ELEM(inst) \
1327 DT_INST_FOREACH_PROP_ELEM_SEP(inst, ce_gpios, \
1328 GPIO_DT_SPEC_GET_BY_IDX, (,))
1329 #define MSPI_DW_CE_GPIOS(inst) \
1330 .ce_gpios = (const struct gpio_dt_spec []) \
1331 { FOREACH_CE_GPIOS_ELEM(inst) }, \
1332 .ce_gpios_len = DT_INST_PROP_LEN(inst, ce_gpios)
1333
1334 #define TX_FIFO_DEPTH(inst) DT_INST_PROP(inst, fifo_depth)
1335 #define RX_FIFO_DEPTH(inst) DT_INST_PROP_OR(inst, rx_fifo_depth, \
1336 TX_FIFO_DEPTH(inst))
1337 #define MSPI_DW_FIFO_PROPS(inst) \
1338 .tx_fifo_depth_minus_1 = TX_FIFO_DEPTH(inst) - 1, \
1339 .tx_fifo_threshold = \
1340 DT_INST_PROP_OR(inst, tx_fifo_threshold, \
1341 7 * TX_FIFO_DEPTH(inst) / 8 - 1), \
1342 .rx_fifo_threshold = \
1343 DT_INST_PROP_OR(inst, rx_fifo_threshold, \
1344 1 * RX_FIFO_DEPTH(inst) / 8 - 1)
1345
1346 #define MSPI_DW_INST(inst) \
1347 PM_DEVICE_DT_INST_DEFINE(inst, dev_pm_action_cb); \
1348 IF_ENABLED(CONFIG_PINCTRL, (PINCTRL_DT_INST_DEFINE(inst);)) \
1349 static void irq_config##inst(void) \
1350 { \
1351 LISTIFY(DT_INST_NUM_IRQS(inst), \
1352 MSPI_DW_INST_IRQ, (;), inst); \
1353 } \
1354 static struct mspi_dw_data dev##inst##_data; \
1355 static const struct mspi_dw_config dev##inst##_config = { \
1356 MSPI_DW_MMIO_ROM_INIT(DT_DRV_INST(inst)), \
1357 .irq_config = irq_config##inst, \
1358 .clock_frequency = MSPI_DW_CLOCK_FREQUENCY(inst), \
1359 IF_ENABLED(CONFIG_PINCTRL, \
1360 (.pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(inst),)) \
1361 IF_ENABLED(DT_INST_NODE_HAS_PROP(inst, ce_gpios), \
1362 (MSPI_DW_CE_GPIOS(inst),)) \
1363 MSPI_DW_FIFO_PROPS(inst), \
1364 DEFINE_REG_ACCESS(inst) \
1365 .sw_multi_periph = \
1366 DT_INST_PROP(inst, software_multiperipheral), \
1367 }; \
1368 DEVICE_DT_INST_DEFINE(inst, \
1369 dev_init, PM_DEVICE_DT_INST_GET(inst), \
1370 &dev##inst##_data, &dev##inst##_config, \
1371 POST_KERNEL, CONFIG_MSPI_INIT_PRIORITY, \
1372 &drv_api);
1373
1374 DT_INST_FOREACH_STATUS_OKAY(MSPI_DW_INST)
1375