1 /*
2 * Copyright (c) 2024, Ambiq Micro Inc. <www.ambiq.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT ambiq_mspi_controller
8
9 #include <zephyr/logging/log.h>
10 #include <zephyr/logging/log_instance.h>
11 LOG_MODULE_REGISTER(mspi_ambiq_ap3);
12 #include <zephyr/kernel.h>
13 #include <zephyr/sys/util.h>
14 #include <zephyr/pm/device.h>
15 #include <zephyr/drivers/pinctrl.h>
16 #include <zephyr/drivers/mspi.h>
17 #include <zephyr/drivers/gpio.h>
18 #include <zephyr/sys_clock.h>
19 #include <zephyr/irq.h>
20
21 #include "mspi_ambiq.h"
22
23 #define MSPI_MAX_FREQ 48000000
24 #define MSPI_MAX_DEVICE 2
25 #define MSPI_TIMEOUT_US 1000000
26 #define PWRCTRL_MAX_WAIT_US 5
27 #define MSPI_BUSY BIT(2)
28
29 typedef int (*mspi_ambiq_pwr_func_t)(void);
30 typedef void (*irq_config_func_t)(void);
31
32 struct mspi_context {
33 const struct mspi_dev_id *owner;
34
35 struct mspi_xfer xfer;
36
37 int packets_left;
38 int packets_done;
39
40 mspi_callback_handler_t callback;
41 struct mspi_callback_context *callback_ctx;
42 bool asynchronous;
43
44 struct k_sem lock;
45 };
46
47 struct mspi_ambiq_config {
48 uint32_t reg_base;
49 uint32_t reg_size;
50
51 struct mspi_cfg mspicfg;
52
53 const struct pinctrl_dev_config *pcfg;
54 irq_config_func_t irq_cfg_func;
55
56 LOG_INSTANCE_PTR_DECLARE(log);
57 };
58
59 struct mspi_ambiq_data {
60 void *mspiHandle;
61 am_hal_mspi_dev_config_t hal_dev_cfg;
62
63 struct mspi_dev_id *dev_id;
64 struct k_mutex lock;
65
66 struct mspi_dev_cfg dev_cfg;
67 struct mspi_xip_cfg xip_cfg;
68 struct mspi_scramble_cfg scramble_cfg;
69
70 mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX];
71 struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX];
72
73 struct mspi_context ctx;
74 };
75
mspi_set_freq(const struct mspi_ambiq_config * cfg,uint32_t freq)76 static int mspi_set_freq(const struct mspi_ambiq_config *cfg, uint32_t freq)
77 {
78 uint32_t d = MSPI_MAX_FREQ / freq;
79
80 switch (d) {
81 case AM_HAL_MSPI_CLK_48MHZ:
82 case AM_HAL_MSPI_CLK_24MHZ:
83 case AM_HAL_MSPI_CLK_16MHZ:
84 case AM_HAL_MSPI_CLK_12MHZ:
85 case AM_HAL_MSPI_CLK_8MHZ:
86 case AM_HAL_MSPI_CLK_6MHZ:
87 case AM_HAL_MSPI_CLK_4MHZ:
88 case AM_HAL_MSPI_CLK_3MHZ:
89 break;
90 default:
91 LOG_INST_ERR(cfg->log, "%u,Frequency not supported!", __LINE__);
92 d = 0;
93 break;
94 }
95
96 return d;
97 }
98
mspi_set_line(const struct mspi_ambiq_config * cfg,enum mspi_io_mode io_mode,enum mspi_data_rate data_rate,uint8_t ce_num)99 static am_hal_mspi_device_e mspi_set_line(const struct mspi_ambiq_config *cfg,
100 enum mspi_io_mode io_mode,
101 enum mspi_data_rate data_rate,
102 uint8_t ce_num)
103 {
104 if (data_rate != MSPI_DATA_RATE_SINGLE) {
105 LOG_INST_ERR(cfg->log, "%u, incorrect data rate, only SDR is supported.", __LINE__);
106 return AM_HAL_MSPI_FLASH_MAX;
107 }
108
109 if (ce_num == 0) {
110 switch (io_mode) {
111 case MSPI_IO_MODE_SINGLE:
112 return AM_HAL_MSPI_FLASH_SERIAL_CE0;
113 case MSPI_IO_MODE_DUAL:
114 return AM_HAL_MSPI_FLASH_DUAL_CE0;
115 case MSPI_IO_MODE_DUAL_1_1_2:
116 return AM_HAL_MSPI_FLASH_DUAL_CE0_1_1_2;
117 case MSPI_IO_MODE_DUAL_1_2_2:
118 return AM_HAL_MSPI_FLASH_DUAL_CE0_1_2_2;
119 case MSPI_IO_MODE_QUAD:
120 return AM_HAL_MSPI_FLASH_QUAD_CE0;
121 case MSPI_IO_MODE_QUAD_1_1_4:
122 return AM_HAL_MSPI_FLASH_QUAD_CE0_1_1_4;
123 case MSPI_IO_MODE_QUAD_1_4_4:
124 return AM_HAL_MSPI_FLASH_QUAD_CE0_1_4_4;
125 case MSPI_IO_MODE_OCTAL:
126 return AM_HAL_MSPI_FLASH_OCTAL_CE0;
127 default:
128 return AM_HAL_MSPI_FLASH_MAX;
129 }
130 } else if (ce_num == 1) {
131 switch (io_mode) {
132 case MSPI_IO_MODE_SINGLE:
133 return AM_HAL_MSPI_FLASH_SERIAL_CE1;
134 case MSPI_IO_MODE_DUAL:
135 return AM_HAL_MSPI_FLASH_DUAL_CE1;
136 case MSPI_IO_MODE_DUAL_1_1_2:
137 return AM_HAL_MSPI_FLASH_DUAL_CE1_1_1_2;
138 case MSPI_IO_MODE_DUAL_1_2_2:
139 return AM_HAL_MSPI_FLASH_DUAL_CE1_1_2_2;
140 case MSPI_IO_MODE_QUAD:
141 return AM_HAL_MSPI_FLASH_QUAD_CE1;
142 case MSPI_IO_MODE_QUAD_1_1_4:
143 return AM_HAL_MSPI_FLASH_QUAD_CE1_1_1_4;
144 case MSPI_IO_MODE_QUAD_1_4_4:
145 return AM_HAL_MSPI_FLASH_QUAD_CE1_1_4_4;
146 case MSPI_IO_MODE_OCTAL:
147 return AM_HAL_MSPI_FLASH_OCTAL_CE1;
148 default:
149 return AM_HAL_MSPI_FLASH_MAX;
150 }
151 } else {
152 return AM_HAL_MSPI_FLASH_MAX;
153 }
154 }
155
mspi_set_mem_boundary(uint32_t mem_boundary)156 static am_hal_mspi_dma_boundary_e mspi_set_mem_boundary(uint32_t mem_boundary)
157 {
158 switch (mem_boundary) {
159 case 0:
160 return AM_HAL_MSPI_BOUNDARY_NONE;
161 case 32:
162 return AM_HAL_MSPI_BOUNDARY_BREAK32;
163 case 64:
164 return AM_HAL_MSPI_BOUNDARY_BREAK64;
165 case 128:
166 return AM_HAL_MSPI_BOUNDARY_BREAK128;
167 case 256:
168 return AM_HAL_MSPI_BOUNDARY_BREAK256;
169 case 512:
170 return AM_HAL_MSPI_BOUNDARY_BREAK512;
171 case 1024:
172 return AM_HAL_MSPI_BOUNDARY_BREAK1K;
173 case 2048:
174 return AM_HAL_MSPI_BOUNDARY_BREAK2K;
175 case 4096:
176 return AM_HAL_MSPI_BOUNDARY_BREAK4K;
177 case 8192:
178 return AM_HAL_MSPI_BOUNDARY_BREAK8K;
179 case 16384:
180 return AM_HAL_MSPI_BOUNDARY_BREAK16K;
181 default:
182 return AM_HAL_MSPI_BOUNDARY_MAX;
183 }
184 }
185
mspi_context_ce_control(struct mspi_context * ctx,bool on)186 static inline void mspi_context_ce_control(struct mspi_context *ctx, bool on)
187 {
188 if (ctx->owner) {
189 if (ctx->xfer.hold_ce &&
190 ctx->xfer.ce_sw_ctrl.gpio.port != NULL) {
191 if (on) {
192 gpio_pin_set_dt(&ctx->xfer.ce_sw_ctrl.gpio, 1);
193 k_busy_wait(ctx->xfer.ce_sw_ctrl.delay);
194 } else {
195 k_busy_wait(ctx->xfer.ce_sw_ctrl.delay);
196 gpio_pin_set_dt(&ctx->xfer.ce_sw_ctrl.gpio, 0);
197 }
198 }
199 }
200 }
201
mspi_context_release(struct mspi_context * ctx)202 static inline void mspi_context_release(struct mspi_context *ctx)
203 {
204 ctx->owner = NULL;
205 k_sem_give(&ctx->lock);
206 }
207
mspi_context_unlock_unconditionally(struct mspi_context * ctx)208 static inline void mspi_context_unlock_unconditionally(struct mspi_context *ctx)
209 {
210 mspi_context_ce_control(ctx, false);
211
212 if (!k_sem_count_get(&ctx->lock)) {
213 ctx->owner = NULL;
214 k_sem_give(&ctx->lock);
215 }
216 }
217
mspi_context_lock(struct mspi_context * ctx,const struct mspi_dev_id * req,const struct mspi_xfer * xfer,mspi_callback_handler_t callback,struct mspi_callback_context * callback_ctx,bool lockon)218 static inline int mspi_context_lock(struct mspi_context *ctx,
219 const struct mspi_dev_id *req,
220 const struct mspi_xfer *xfer,
221 mspi_callback_handler_t callback,
222 struct mspi_callback_context *callback_ctx,
223 bool lockon)
224 {
225 int ret = 1;
226
227 if ((k_sem_count_get(&ctx->lock) == 0) && !lockon &&
228 (ctx->owner == req)) {
229 return 0;
230 }
231
232 if (k_sem_take(&ctx->lock, K_MSEC(xfer->timeout))) {
233 return -EBUSY;
234 }
235 if (ctx->xfer.async) {
236 if ((xfer->tx_dummy == ctx->xfer.tx_dummy) &&
237 (xfer->rx_dummy == ctx->xfer.rx_dummy) &&
238 (xfer->cmd_length == ctx->xfer.cmd_length) &&
239 (xfer->addr_length == ctx->xfer.addr_length)) {
240 ret = 0;
241 } else if (ctx->packets_left == 0) {
242 if (ctx->callback_ctx) {
243 volatile struct mspi_event_data *evt_data;
244
245 evt_data = &ctx->callback_ctx->mspi_evt.evt_data;
246 while (evt_data->status != 0) {
247 }
248 ret = 1;
249 } else {
250 ret = 0;
251 }
252 } else {
253 return -EIO;
254 }
255 }
256 ctx->owner = req;
257 ctx->xfer = *xfer;
258 ctx->packets_done = 0;
259 ctx->packets_left = ctx->xfer.num_packet;
260 ctx->callback = callback;
261 ctx->callback_ctx = callback_ctx;
262 return ret;
263 }
264
mspi_is_inp(const struct device * controller)265 static inline bool mspi_is_inp(const struct device *controller)
266 {
267 struct mspi_ambiq_data *data = controller->data;
268
269 return (k_sem_count_get(&data->ctx.lock) == 0);
270 }
271
mspi_verify_device(const struct device * controller,const struct mspi_dev_id * dev_id)272 static inline int mspi_verify_device(const struct device *controller,
273 const struct mspi_dev_id *dev_id)
274 {
275 const struct mspi_ambiq_config *cfg = controller->config;
276 int device_index = cfg->mspicfg.num_periph;
277 int ret = 0;
278
279 for (int i = 0; i < cfg->mspicfg.num_periph; i++) {
280 if (dev_id->ce.port == cfg->mspicfg.ce_group[i].port &&
281 dev_id->ce.pin == cfg->mspicfg.ce_group[i].pin &&
282 dev_id->ce.dt_flags == cfg->mspicfg.ce_group[i].dt_flags) {
283 device_index = i;
284 }
285 }
286
287 if (device_index >= cfg->mspicfg.num_periph ||
288 device_index != dev_id->dev_idx) {
289 LOG_INST_ERR(cfg->log, "%u, invalid device ID.", __LINE__);
290 return -ENODEV;
291 }
292
293 return ret;
294 }
295
mspi_ambiq_deinit(const struct device * controller)296 static int mspi_ambiq_deinit(const struct device *controller)
297 {
298 const struct mspi_ambiq_config *cfg = controller->config;
299 struct mspi_ambiq_data *data = controller->data;
300 int ret = 0;
301
302 if (!data->mspiHandle) {
303 LOG_INST_ERR(cfg->log, "%u, the mspi not yet initialized.", __LINE__);
304 return -ENODEV;
305 }
306
307 if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) {
308 LOG_INST_ERR(cfg->log, "%u, fail to gain controller access.", __LINE__);
309 return -EBUSY;
310 }
311
312 ret = am_hal_mspi_interrupt_disable(data->mspiHandle, 0xFFFFFFFF);
313 if (ret) {
314 LOG_INST_ERR(cfg->log, "%u, fail to disable interrupt, code:%d.",
315 __LINE__, ret);
316 ret = -EHOSTDOWN;
317 goto e_deinit_return;
318 }
319
320 ret = am_hal_mspi_interrupt_clear(data->mspiHandle, 0xFFFFFFFF);
321 if (ret) {
322 LOG_INST_ERR(cfg->log, "%u, fail to clear interrupt, code:%d.",
323 __LINE__, ret);
324 ret = -EHOSTDOWN;
325 goto e_deinit_return;
326 }
327
328 ret = am_hal_mspi_disable(data->mspiHandle);
329 if (ret) {
330 LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.",
331 __LINE__, ret);
332 ret = -EHOSTDOWN;
333 goto e_deinit_return;
334 }
335
336 ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_DEEPSLEEP, false);
337 if (ret) {
338 LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.",
339 __LINE__, ret);
340 ret = -EHOSTDOWN;
341 goto e_deinit_return;
342 }
343
344 ret = am_hal_mspi_deinitialize(data->mspiHandle);
345 if (ret) {
346 LOG_INST_ERR(cfg->log, "%u, fail to deinit MSPI.", __LINE__);
347 ret = -ENODEV;
348 goto e_deinit_return;
349 }
350 return ret;
351
352 e_deinit_return:
353 k_mutex_unlock(&data->lock);
354 return ret;
355 }
356
357 /** DMA specific config */
mspi_xfer_config(const struct device * controller,const struct mspi_xfer * xfer)358 static int mspi_xfer_config(const struct device *controller,
359 const struct mspi_xfer *xfer)
360 {
361 const struct mspi_ambiq_config *cfg = controller->config;
362 struct mspi_ambiq_data *data = controller->data;
363 am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg;
364 am_hal_mspi_request_e eRequest;
365 int ret = 0;
366
367 if (data->scramble_cfg.enable) {
368 eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN;
369 } else {
370 eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS;
371 }
372
373 ret = am_hal_mspi_disable(data->mspiHandle);
374 if (ret) {
375 LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.",
376 __LINE__, ret);
377 return -EHOSTDOWN;
378 }
379
380 ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL);
381 if (ret) {
382 LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.",
383 __LINE__, data->scramble_cfg.enable);
384 return -EHOSTDOWN;
385 }
386
387 if (xfer->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) {
388 LOG_INST_ERR(cfg->log, "%u, cmd_length is too large.", __LINE__);
389 return -ENOTSUP;
390 }
391 if (xfer->cmd_length == 0) {
392 hal_dev_cfg.bSendInstr = false;
393 } else {
394 hal_dev_cfg.bSendInstr = true;
395 hal_dev_cfg.eInstrCfg = xfer->cmd_length - 1;
396 }
397
398 if (xfer->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) {
399 LOG_INST_ERR(cfg->log, "%u, addr_length is too large.", __LINE__);
400 return -ENOTSUP;
401 }
402 if (xfer->addr_length == 0) {
403 hal_dev_cfg.bSendAddr = false;
404 } else {
405 hal_dev_cfg.bSendAddr = true;
406 hal_dev_cfg.eAddrCfg = xfer->addr_length - 1;
407 }
408
409 hal_dev_cfg.bTurnaround = (xfer->rx_dummy != 0);
410 hal_dev_cfg.ui8TurnAround = (uint8_t)xfer->rx_dummy;
411 hal_dev_cfg.bEnWriteLatency = (xfer->tx_dummy != 0);
412 hal_dev_cfg.ui8WriteLatency = (uint8_t)xfer->tx_dummy;
413
414 ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg);
415 if (ret) {
416 LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.",
417 __LINE__, ret);
418 return -EHOSTDOWN;
419 }
420
421 ret = am_hal_mspi_enable(data->mspiHandle);
422 if (ret) {
423 LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.",
424 __LINE__, ret);
425 return -EHOSTDOWN;
426 }
427
428 data->hal_dev_cfg = hal_dev_cfg;
429 return ret;
430 }
431
mspi_ambiq_config(const struct mspi_dt_spec * spec)432 static int mspi_ambiq_config(const struct mspi_dt_spec *spec)
433 {
434 const struct mspi_cfg *config = &spec->config;
435 const struct mspi_ambiq_config *cfg = spec->bus->config;
436 struct mspi_ambiq_data *data = spec->bus->data;
437
438 int ret = 0;
439
440 if (config->op_mode != MSPI_OP_MODE_CONTROLLER) {
441 LOG_INST_ERR(cfg->log, "%u, only support MSPI controller mode.", __LINE__);
442 return -ENOTSUP;
443 }
444
445 if (config->max_freq > MSPI_MAX_FREQ) {
446 LOG_INST_ERR(cfg->log, "%u, max_freq too large.", __LINE__);
447 return -ENOTSUP;
448 }
449
450 if (config->duplex != MSPI_HALF_DUPLEX) {
451 LOG_INST_ERR(cfg->log, "%u, only support half duplex mode.", __LINE__);
452 return -ENOTSUP;
453 }
454
455 if (config->dqs_support) {
456 LOG_INST_ERR(cfg->log, "%u, only support non-DQS mode.", __LINE__);
457 return -ENOTSUP;
458 }
459
460 if (config->re_init) {
461 ret = mspi_ambiq_deinit(spec->bus);
462 if (ret) {
463 return ret;
464 }
465 }
466
467 ret = am_hal_mspi_initialize(config->channel_num, &data->mspiHandle);
468 if (ret) {
469 LOG_INST_ERR(cfg->log, "%u, fail to initialize MSPI, code:%d.",
470 __LINE__, ret);
471 return -EPERM;
472 }
473
474 ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_WAKE, false);
475 if (ret) {
476 LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.",
477 __LINE__, ret);
478 return -EHOSTDOWN;
479 }
480
481 ret = am_hal_mspi_enable(data->mspiHandle);
482 if (ret) {
483 LOG_INST_ERR(cfg->log, "%u, fail to Enable MSPI, code:%d.",
484 __LINE__, ret);
485 return -EHOSTDOWN;
486 }
487
488 ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
489 if (ret) {
490 return ret;
491 }
492
493 ret = am_hal_mspi_interrupt_clear(data->mspiHandle, AM_HAL_MSPI_INT_CQUPD |
494 AM_HAL_MSPI_INT_ERR);
495 if (ret) {
496 LOG_INST_ERR(cfg->log, "%u, fail to clear interrupt, code:%d.",
497 __LINE__, ret);
498 return -EHOSTDOWN;
499 }
500
501 ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_CQUPD |
502 AM_HAL_MSPI_INT_ERR);
503 if (ret) {
504 LOG_INST_ERR(cfg->log, "%u, fail to turn on interrupt, code:%d.",
505 __LINE__, ret);
506 return -EHOSTDOWN;
507 }
508
509 cfg->irq_cfg_func();
510
511 mspi_context_unlock_unconditionally(&data->ctx);
512
513 if (config->re_init) {
514 k_mutex_unlock(&data->lock);
515 }
516
517 return ret;
518 }
519
mspi_ambiq_dev_config(const struct device * controller,const struct mspi_dev_id * dev_id,const enum mspi_dev_cfg_mask param_mask,const struct mspi_dev_cfg * dev_cfg)520 static int mspi_ambiq_dev_config(const struct device *controller,
521 const struct mspi_dev_id *dev_id,
522 const enum mspi_dev_cfg_mask param_mask,
523 const struct mspi_dev_cfg *dev_cfg)
524 {
525 const struct mspi_ambiq_config *cfg = controller->config;
526 struct mspi_ambiq_data *data = controller->data;
527 am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg;
528 int ret = 0;
529
530 if (data->dev_id != dev_id) {
531 if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) {
532 LOG_INST_ERR(cfg->log, "%u, fail to gain controller access.", __LINE__);
533 return -EBUSY;
534 }
535
536 ret = mspi_verify_device(controller, dev_id);
537 if (ret) {
538 goto e_return;
539 }
540 }
541
542 if (mspi_is_inp(controller)) {
543 ret = -EBUSY;
544 goto e_return;
545 }
546
547 if (param_mask == MSPI_DEVICE_CONFIG_NONE &&
548 !cfg->mspicfg.sw_multi_periph) {
549 /* Do nothing except obtaining the controller lock */
550 data->dev_id = (struct mspi_dev_id *)dev_id;
551 return ret;
552
553 } else if (param_mask != MSPI_DEVICE_CONFIG_ALL) {
554 if (data->dev_id != dev_id) {
555 LOG_INST_ERR(cfg->log, "%u, config failed, must be the same device.",
556 __LINE__);
557 ret = -ENOTSUP;
558 goto e_return;
559 }
560
561 if ((param_mask & (~(MSPI_DEVICE_CONFIG_FREQUENCY |
562 MSPI_DEVICE_CONFIG_IO_MODE |
563 MSPI_DEVICE_CONFIG_CE_NUM |
564 MSPI_DEVICE_CONFIG_DATA_RATE |
565 MSPI_DEVICE_CONFIG_CMD_LEN |
566 MSPI_DEVICE_CONFIG_ADDR_LEN)))) {
567 LOG_INST_ERR(cfg->log, "%u, config type not supported.", __LINE__);
568 ret = -ENOTSUP;
569 goto e_return;
570 }
571
572 if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) {
573 hal_dev_cfg.eClockFreq = mspi_set_freq(cfg, dev_cfg->freq);
574 if (hal_dev_cfg.eClockFreq == 0) {
575 ret = -ENOTSUP;
576 goto e_return;
577 }
578 ret = am_hal_mspi_control(data->mspiHandle,
579 AM_HAL_MSPI_REQ_CLOCK_CONFIG,
580 &hal_dev_cfg.eClockFreq);
581 if (ret) {
582 LOG_INST_ERR(cfg->log, "%u, failed to configure eClockFreq.",
583 __LINE__);
584 ret = -EHOSTDOWN;
585 goto e_return;
586 }
587 data->dev_cfg.freq = dev_cfg->freq;
588 }
589
590 if ((param_mask & MSPI_DEVICE_CONFIG_IO_MODE) ||
591 (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) ||
592 (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE)) {
593 hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode,
594 dev_cfg->data_rate,
595 dev_cfg->ce_num);
596 if (hal_dev_cfg.eDeviceConfig == AM_HAL_MSPI_FLASH_MAX) {
597 ret = -ENOTSUP;
598 goto e_return;
599 }
600 ret = am_hal_mspi_control(data->mspiHandle,
601 AM_HAL_MSPI_REQ_DEVICE_CONFIG,
602 &hal_dev_cfg.eDeviceConfig);
603 if (ret) {
604 LOG_INST_ERR(cfg->log, "%u, failed to configure device.", __LINE__);
605 ret = -EHOSTDOWN;
606 goto e_return;
607 }
608 data->dev_cfg.freq = dev_cfg->io_mode;
609 data->dev_cfg.data_rate = dev_cfg->data_rate;
610 data->dev_cfg.ce_num = dev_cfg->ce_num;
611 }
612
613 if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) {
614 if (dev_cfg->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1 ||
615 dev_cfg->cmd_length == 0) {
616 LOG_INST_ERR(cfg->log, "%u, invalid cmd_length.", __LINE__);
617 ret = -ENOTSUP;
618 goto e_return;
619 }
620 hal_dev_cfg.eInstrCfg = dev_cfg->cmd_length - 1;
621 ret = am_hal_mspi_control(data->mspiHandle,
622 AM_HAL_MSPI_REQ_ISIZE_SET,
623 &hal_dev_cfg.eInstrCfg);
624 if (ret) {
625 LOG_INST_ERR(cfg->log, "%u, failed to configure cmd_length.",
626 __LINE__);
627 ret = -EHOSTDOWN;
628 goto e_return;
629 }
630 data->dev_cfg.cmd_length = dev_cfg->cmd_length;
631 }
632
633 if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) {
634 if (dev_cfg->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1 ||
635 dev_cfg->addr_length == 0) {
636 LOG_INST_ERR(cfg->log, "%u, invalid addr_length.", __LINE__);
637 ret = -ENOTSUP;
638 goto e_return;
639 }
640 hal_dev_cfg.eAddrCfg = dev_cfg->addr_length - 1;
641 ret = am_hal_mspi_control(data->mspiHandle,
642 AM_HAL_MSPI_REQ_ASIZE_SET,
643 &hal_dev_cfg.eAddrCfg);
644 if (ret) {
645 LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.",
646 __LINE__);
647 ret = -EHOSTDOWN;
648 goto e_return;
649 }
650 data->dev_cfg.addr_length = dev_cfg->addr_length;
651 }
652
653 } else {
654
655 if (data->dev_id != dev_id) {
656 ret = pinctrl_apply_state(cfg->pcfg,
657 PINCTRL_STATE_PRIV_START + dev_id->dev_idx);
658 if (ret) {
659 goto e_return;
660 }
661 }
662
663 if (memcmp(&data->dev_cfg, dev_cfg, sizeof(struct mspi_dev_cfg)) == 0) {
664 /** Nothing to config */
665 data->dev_id = (struct mspi_dev_id *)dev_id;
666 return ret;
667 }
668
669 if (dev_cfg->endian != MSPI_XFER_LITTLE_ENDIAN) {
670 LOG_INST_ERR(cfg->log, "%u, only support MSB first.", __LINE__);
671 ret = -ENOTSUP;
672 goto e_return;
673 }
674
675 if (dev_cfg->dqs_enable && !cfg->mspicfg.dqs_support) {
676 LOG_INST_ERR(cfg->log, "%u, only support non-DQS mode.", __LINE__);
677 ret = -ENOTSUP;
678 goto e_return;
679 }
680
681 hal_dev_cfg.eSpiMode = dev_cfg->cpp;
682 hal_dev_cfg.bEnWriteLatency = (dev_cfg->tx_dummy != 0);
683 hal_dev_cfg.ui8WriteLatency = dev_cfg->tx_dummy;
684 hal_dev_cfg.bTurnaround = (dev_cfg->rx_dummy != 0);
685 hal_dev_cfg.ui8TurnAround = dev_cfg->rx_dummy;
686
687 hal_dev_cfg.eClockFreq = mspi_set_freq(cfg, dev_cfg->freq);
688 if (hal_dev_cfg.eClockFreq == 0) {
689 ret = -ENOTSUP;
690 goto e_return;
691 }
692
693 hal_dev_cfg.eDeviceConfig = mspi_set_line(cfg, dev_cfg->io_mode, dev_cfg->data_rate,
694 dev_cfg->ce_num);
695 if (hal_dev_cfg.eDeviceConfig == AM_HAL_MSPI_FLASH_MAX) {
696 ret = -ENOTSUP;
697 goto e_return;
698 }
699
700 if (dev_cfg->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) {
701 LOG_INST_ERR(cfg->log, "%u, cmd_length too large.", __LINE__);
702 ret = -ENOTSUP;
703 goto e_return;
704 }
705 if (dev_cfg->cmd_length == 0) {
706 hal_dev_cfg.bSendInstr = false;
707 } else {
708 hal_dev_cfg.bSendInstr = true;
709 hal_dev_cfg.eInstrCfg = dev_cfg->cmd_length - 1;
710 }
711
712 if (dev_cfg->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) {
713 LOG_INST_ERR(cfg->log, "%u, addr_length too large.", __LINE__);
714 ret = -ENOTSUP;
715 goto e_return;
716 }
717 if (dev_cfg->addr_length == 0) {
718 hal_dev_cfg.bSendAddr = false;
719 } else {
720 hal_dev_cfg.bSendAddr = true;
721 hal_dev_cfg.eAddrCfg = dev_cfg->addr_length - 1;
722 }
723
724 hal_dev_cfg.ui8ReadInstr = (uint8_t)dev_cfg->read_cmd;
725 hal_dev_cfg.ui8WriteInstr = (uint8_t)dev_cfg->write_cmd;
726
727 hal_dev_cfg.eDMABoundary = mspi_set_mem_boundary(dev_cfg->mem_boundary);
728 if (hal_dev_cfg.eDMABoundary >= AM_HAL_MSPI_BOUNDARY_MAX) {
729 LOG_INST_ERR(cfg->log, "%u, mem_boundary too large.", __LINE__);
730 ret = -ENOTSUP;
731 goto e_return;
732 }
733
734 /** ui16DMATimeLimit unit is in 0.1us */
735 hal_dev_cfg.ui16DMATimeLimit = dev_cfg->time_to_break * 10;
736
737 ret = am_hal_mspi_disable(data->mspiHandle);
738 if (ret) {
739 LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret);
740 ret = -EHOSTDOWN;
741 goto e_return;
742 }
743
744 ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg);
745 if (ret) {
746 LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__,
747 ret);
748 ret = -EHOSTDOWN;
749 goto e_return;
750 }
751
752 ret = am_hal_mspi_enable(data->mspiHandle);
753 if (ret) {
754 LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret);
755 ret = -EHOSTDOWN;
756 goto e_return;
757 }
758 data->dev_cfg = *dev_cfg;
759 data->dev_id = (struct mspi_dev_id *)dev_id;
760 }
761 data->hal_dev_cfg = hal_dev_cfg;
762
763 return ret;
764
765 e_return:
766 k_mutex_unlock(&data->lock);
767 return ret;
768 }
769
mspi_ambiq_xip_config(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_xip_cfg * xip_cfg)770 static int mspi_ambiq_xip_config(const struct device *controller,
771 const struct mspi_dev_id *dev_id,
772 const struct mspi_xip_cfg *xip_cfg)
773 {
774 const struct mspi_ambiq_config *cfg = controller->config;
775 struct mspi_ambiq_data *data = controller->data;
776 am_hal_mspi_request_e eRequest;
777 int ret = 0;
778
779 if (dev_id != data->dev_id) {
780 LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__);
781 return -ESTALE;
782 }
783
784 if (xip_cfg->enable) {
785 eRequest = AM_HAL_MSPI_REQ_XIP_EN;
786 } else {
787 eRequest = AM_HAL_MSPI_REQ_XIP_DIS;
788 }
789
790 ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL);
791 if (ret) {
792 LOG_INST_ERR(cfg->log, "%u,Unable to complete xip config:%d.", __LINE__,
793 xip_cfg->enable);
794 return -EHOSTDOWN;
795 }
796
797 data->xip_cfg = *xip_cfg;
798 return ret;
799 }
800
mspi_ambiq_scramble_config(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_scramble_cfg * scramble_cfg)801 static int mspi_ambiq_scramble_config(const struct device *controller,
802 const struct mspi_dev_id *dev_id,
803 const struct mspi_scramble_cfg *scramble_cfg)
804 {
805 const struct mspi_ambiq_config *cfg = controller->config;
806 struct mspi_ambiq_data *data = controller->data;
807 am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg;
808 am_hal_mspi_request_e eRequest;
809 int ret = 0;
810
811 if (mspi_is_inp(controller)) {
812 return -EBUSY;
813 }
814
815 if (dev_id != data->dev_id) {
816 LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__);
817 return -ESTALE;
818 }
819
820 if (scramble_cfg->enable) {
821 eRequest = AM_HAL_MSPI_REQ_SCRAMB_EN;
822 } else {
823 eRequest = AM_HAL_MSPI_REQ_SCRAMB_DIS;
824 }
825
826 ret = am_hal_mspi_disable(data->mspiHandle);
827 if (ret) {
828 LOG_INST_ERR(cfg->log, "%u, fail to disable MSPI, code:%d.", __LINE__, ret);
829 return -EHOSTDOWN;
830 }
831
832 ret = am_hal_mspi_control(data->mspiHandle, eRequest, NULL);
833 if (ret) {
834 LOG_INST_ERR(cfg->log, "%u,Unable to complete scramble config:%d.", __LINE__,
835 scramble_cfg->enable);
836 return -EHOSTDOWN;
837 }
838
839 hal_dev_cfg.scramblingStartAddr = 0 + scramble_cfg->address_offset;
840 hal_dev_cfg.scramblingEndAddr = hal_dev_cfg.scramblingStartAddr + scramble_cfg->size;
841
842 ret = am_hal_mspi_device_configure(data->mspiHandle, &hal_dev_cfg);
843 if (ret) {
844 LOG_INST_ERR(cfg->log, "%u, fail to configure MSPI, code:%d.", __LINE__, ret);
845 return -EHOSTDOWN;
846 }
847
848 ret = am_hal_mspi_enable(data->mspiHandle);
849 if (ret) {
850 LOG_INST_ERR(cfg->log, "%u, fail to enable MSPI, code:%d.", __LINE__, ret);
851 return -EHOSTDOWN;
852 }
853
854 data->scramble_cfg = *scramble_cfg;
855 data->hal_dev_cfg = hal_dev_cfg;
856 return ret;
857 }
858
mspi_ambiq_timing_config(const struct device * controller,const struct mspi_dev_id * dev_id,const uint32_t param_mask,void * timing_cfg)859 static int mspi_ambiq_timing_config(const struct device *controller,
860 const struct mspi_dev_id *dev_id,
861 const uint32_t param_mask,
862 void *timing_cfg)
863 {
864 const struct mspi_ambiq_config *cfg = controller->config;
865 struct mspi_ambiq_data *data = controller->data;
866 am_hal_mspi_dev_config_t hal_dev_cfg = data->hal_dev_cfg;
867 struct mspi_ambiq_timing_cfg *time_cfg = timing_cfg;
868 am_hal_mspi_timing_scan_t timing;
869 int ret = 0;
870
871 if (mspi_is_inp(controller)) {
872 return -EBUSY;
873 }
874
875 if (dev_id != data->dev_id) {
876 LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__);
877 return -ESTALE;
878 }
879
880 if (param_mask & (~(MSPI_AMBIQ_SET_WLC | MSPI_AMBIQ_SET_RLC))) {
881 LOG_INST_ERR(cfg->log, "%u, config type not supported.", __LINE__);
882 return -ENOTSUP;
883 }
884
885 if (param_mask & MSPI_AMBIQ_SET_WLC) {
886 if (time_cfg->ui8WriteLatency) {
887 hal_dev_cfg.bEnWriteLatency = true;
888 } else {
889 hal_dev_cfg.bEnWriteLatency = false;
890 }
891 hal_dev_cfg.ui8WriteLatency = time_cfg->ui8WriteLatency;
892 }
893
894 if (param_mask & MSPI_AMBIQ_SET_RLC) {
895 if (time_cfg->ui8TurnAround) {
896 hal_dev_cfg.bTurnaround = true;
897 } else {
898 hal_dev_cfg.bTurnaround = false;
899 }
900 hal_dev_cfg.ui8TurnAround = time_cfg->ui8TurnAround;
901 }
902
903 timing.ui8Turnaround = hal_dev_cfg.ui8TurnAround;
904 timing.ui8WriteLatency = hal_dev_cfg.ui8WriteLatency;
905
906 ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_TIMING_SCAN, &timing);
907 if (ret) {
908 LOG_INST_ERR(cfg->log, "%u, fail to configure timing.", __LINE__);
909 return -EHOSTDOWN;
910 }
911
912 data->hal_dev_cfg = hal_dev_cfg;
913 return ret;
914 }
915
mspi_ambiq_get_channel_status(const struct device * controller,uint8_t ch)916 static int mspi_ambiq_get_channel_status(const struct device *controller, uint8_t ch)
917 {
918 ARG_UNUSED(ch);
919
920 const struct mspi_ambiq_config *cfg = controller->config;
921 struct mspi_ambiq_data *data = controller->data;
922 int ret = 0;
923
924 if (sys_read32(cfg->reg_base) & MSPI_BUSY) {
925 ret = -EBUSY;
926 }
927
928 if (mspi_is_inp(controller)) {
929 return -EBUSY;
930 }
931
932 data->dev_id = NULL;
933 k_mutex_unlock(&data->lock);
934
935 return ret;
936 }
937
mspi_ambiq_isr(const struct device * dev)938 static void mspi_ambiq_isr(const struct device *dev)
939 {
940 struct mspi_ambiq_data *data = dev->data;
941 uint32_t ui32Status;
942
943 am_hal_mspi_interrupt_status_get(data->mspiHandle, &ui32Status, false);
944 am_hal_mspi_interrupt_clear(data->mspiHandle, ui32Status);
945 am_hal_mspi_interrupt_service(data->mspiHandle, ui32Status);
946 }
947
948 /** Manage sync dma transceive */
hal_mspi_callback(void * pCallbackCtxt,uint32_t status)949 static void hal_mspi_callback(void *pCallbackCtxt, uint32_t status)
950 {
951 const struct device *controller = pCallbackCtxt;
952 struct mspi_ambiq_data *data = controller->data;
953
954 data->ctx.packets_done++;
955 }
956
mspi_pio_prepare(const struct device * controller,am_hal_mspi_pio_transfer_t * trans)957 static int mspi_pio_prepare(const struct device *controller,
958 am_hal_mspi_pio_transfer_t *trans)
959 {
960 const struct mspi_ambiq_config *cfg = controller->config;
961 struct mspi_ambiq_data *data = controller->data;
962 const struct mspi_xfer *xfer = &data->ctx.xfer;
963 int ret = 0;
964
965 trans->bScrambling = false;
966 trans->bSendAddr = (xfer->addr_length != 0);
967 trans->bSendInstr = (xfer->cmd_length != 0);
968 trans->bTurnaround = (xfer->rx_dummy != 0);
969 trans->bEnWRLatency = (xfer->tx_dummy != 0);
970 trans->bDCX = false;
971 trans->bQuadCmd = false;
972 trans->bContinue = false;
973
974 if (xfer->cmd_length > AM_HAL_MSPI_INSTR_2_BYTE + 1) {
975 LOG_INST_ERR(cfg->log, "%u, invalid cmd_length.", __LINE__);
976 return -ENOTSUP;
977 }
978 if (xfer->cmd_length != 0) {
979 am_hal_mspi_instr_e eInstrCfg = xfer->cmd_length - 1;
980
981 ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ISIZE_SET, &eInstrCfg);
982 if (ret) {
983 LOG_INST_ERR(cfg->log, "%u, failed to configure cmd_length.",
984 __LINE__);
985 return -EHOSTDOWN;
986 }
987 data->hal_dev_cfg.eInstrCfg = eInstrCfg;
988 }
989 data->dev_cfg.cmd_length = xfer->cmd_length;
990
991 if (xfer->addr_length > AM_HAL_MSPI_ADDR_4_BYTE + 1) {
992 LOG_INST_ERR(cfg->log, "%u, invalid addr_length.", __LINE__);
993 return -ENOTSUP;
994 }
995 if (xfer->addr_length != 0) {
996 am_hal_mspi_addr_e eAddrCfg = xfer->addr_length - 1;
997
998 ret = am_hal_mspi_control(data->mspiHandle, AM_HAL_MSPI_REQ_ASIZE_SET, &eAddrCfg);
999 if (ret) {
1000 LOG_INST_ERR(cfg->log, "%u, failed to configure addr_length.", __LINE__);
1001 return -EHOSTDOWN;
1002 }
1003 data->hal_dev_cfg.eAddrCfg = eAddrCfg;
1004 }
1005 data->dev_cfg.addr_length = xfer->addr_length;
1006
1007 return ret;
1008 }
1009
mspi_pio_transceive(const struct device * controller,const struct mspi_xfer * xfer,mspi_callback_handler_t cb,struct mspi_callback_context * cb_ctx)1010 static int mspi_pio_transceive(const struct device *controller,
1011 const struct mspi_xfer *xfer,
1012 mspi_callback_handler_t cb,
1013 struct mspi_callback_context *cb_ctx)
1014 {
1015 const struct mspi_ambiq_config *cfg = controller->config;
1016 struct mspi_ambiq_data *data = controller->data;
1017 struct mspi_context *ctx = &data->ctx;
1018 const struct mspi_xfer_packet *packet;
1019 uint32_t packet_idx;
1020 am_hal_mspi_pio_transfer_t trans;
1021 int ret = 0;
1022 int cfg_flag = 0;
1023
1024 if (xfer->num_packet == 0 ||
1025 !xfer->packets ||
1026 xfer->timeout > CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE) {
1027 return -EFAULT;
1028 }
1029
1030 cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true);
1031 /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length
1032 * in mspi_xfer of the two calls are the same if the first one has not finished yet.
1033 */
1034 if (cfg_flag) {
1035 if (cfg_flag == 1) {
1036 ret = mspi_pio_prepare(controller, &trans);
1037 if (ret) {
1038 goto pio_err;
1039 }
1040 } else {
1041 ret = cfg_flag;
1042 goto pio_err;
1043 }
1044 }
1045
1046 if (!ctx->xfer.async) {
1047
1048 while (ctx->packets_left > 0) {
1049 packet_idx = ctx->xfer.num_packet - ctx->packets_left;
1050 packet = &ctx->xfer.packets[packet_idx];
1051 trans.eDirection = packet->dir;
1052 trans.ui16DeviceInstr = (uint16_t)packet->cmd;
1053 trans.ui32DeviceAddr = packet->address;
1054 trans.ui32NumBytes = packet->num_bytes;
1055 trans.pui32Buffer = (uint32_t *)packet->data_buf;
1056
1057 ret = am_hal_mspi_blocking_transfer(data->mspiHandle, &trans,
1058 MSPI_TIMEOUT_US);
1059 ctx->packets_left--;
1060 if (ret) {
1061 ret = -EIO;
1062 goto pio_err;
1063 }
1064 }
1065
1066 } else {
1067
1068 ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP);
1069 if (ret) {
1070 LOG_INST_ERR(cfg->log, "%u, failed to enable interrupt.", __LINE__);
1071 ret = -EHOSTDOWN;
1072 goto pio_err;
1073 }
1074
1075 while (ctx->packets_left > 0) {
1076 packet_idx = ctx->xfer.num_packet - ctx->packets_left;
1077 packet = &ctx->xfer.packets[packet_idx];
1078 trans.eDirection = packet->dir;
1079 trans.ui16DeviceInstr = (uint16_t)packet->cmd;
1080 trans.ui32DeviceAddr = packet->address;
1081 trans.ui32NumBytes = packet->num_bytes;
1082 trans.pui32Buffer = (uint32_t *)packet->data_buf;
1083
1084 if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) {
1085 ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE;
1086 ctx->callback_ctx->mspi_evt.evt_data.controller = controller;
1087 ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner;
1088 ctx->callback_ctx->mspi_evt.evt_data.packet = packet;
1089 ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx;
1090 ctx->callback_ctx->mspi_evt.evt_data.status = ~0;
1091 }
1092
1093 am_hal_mspi_callback_t callback = NULL;
1094
1095 if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) {
1096 callback = (am_hal_mspi_callback_t)ctx->callback;
1097 }
1098
1099 ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_PIO,
1100 callback, (void *)ctx->callback_ctx);
1101 ctx->packets_left--;
1102 if (ret) {
1103 if (ret == AM_HAL_STATUS_OUT_OF_RANGE) {
1104 ret = -ENOMEM;
1105 } else {
1106 ret = -EIO;
1107 }
1108 goto pio_err;
1109 }
1110 }
1111 }
1112
1113 pio_err:
1114 mspi_context_release(ctx);
1115 return ret;
1116 }
1117
mspi_dma_transceive(const struct device * controller,const struct mspi_xfer * xfer,mspi_callback_handler_t cb,struct mspi_callback_context * cb_ctx)1118 static int mspi_dma_transceive(const struct device *controller,
1119 const struct mspi_xfer *xfer,
1120 mspi_callback_handler_t cb,
1121 struct mspi_callback_context *cb_ctx)
1122 {
1123 const struct mspi_ambiq_config *cfg = controller->config;
1124 struct mspi_ambiq_data *data = controller->data;
1125 struct mspi_context *ctx = &data->ctx;
1126 am_hal_mspi_dma_transfer_t trans;
1127 int ret = 0;
1128 int cfg_flag = 0;
1129
1130 if (xfer->num_packet == 0 ||
1131 !xfer->packets ||
1132 xfer->timeout > CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE) {
1133 return -EFAULT;
1134 }
1135
1136 cfg_flag = mspi_context_lock(ctx, data->dev_id, xfer, cb, cb_ctx, true);
1137 /** For async, user must make sure when cfg_flag = 0 the dummy and instr addr length
1138 * in mspi_xfer of the two calls are the same if the first one has not finished yet.
1139 */
1140 if (cfg_flag) {
1141 if (cfg_flag == 1) {
1142 ret = mspi_xfer_config(controller, xfer);
1143 if (ret) {
1144 goto dma_err;
1145 }
1146 } else {
1147 ret = cfg_flag;
1148 goto dma_err;
1149 }
1150 }
1151
1152 ret = am_hal_mspi_interrupt_enable(data->mspiHandle, AM_HAL_MSPI_INT_DMACMP);
1153 if (ret) {
1154 LOG_INST_ERR(cfg->log, "%u, failed to enable interrupt.", __LINE__);
1155 ret = -EHOSTDOWN;
1156 goto dma_err;
1157 }
1158
1159 while (ctx->packets_left > 0) {
1160 uint32_t packet_idx = ctx->xfer.num_packet - ctx->packets_left;
1161 const struct mspi_xfer_packet *packet;
1162
1163 packet = &ctx->xfer.packets[packet_idx];
1164 trans.ui8Priority = ctx->xfer.priority;
1165 trans.eDirection = packet->dir;
1166 trans.ui32TransferCount = packet->num_bytes;
1167 trans.ui32DeviceAddress = packet->address;
1168 trans.ui32SRAMAddress = (uint32_t)packet->data_buf;
1169 trans.ui32PauseCondition = 0;
1170 trans.ui32StatusSetClr = 0;
1171
1172 if (ctx->xfer.async) {
1173
1174 if (ctx->callback && packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) {
1175 ctx->callback_ctx->mspi_evt.evt_type = MSPI_BUS_XFER_COMPLETE;
1176 ctx->callback_ctx->mspi_evt.evt_data.controller = controller;
1177 ctx->callback_ctx->mspi_evt.evt_data.dev_id = data->ctx.owner;
1178 ctx->callback_ctx->mspi_evt.evt_data.packet = packet;
1179 ctx->callback_ctx->mspi_evt.evt_data.packet_idx = packet_idx;
1180 ctx->callback_ctx->mspi_evt.evt_data.status = ~0;
1181 }
1182
1183 am_hal_mspi_callback_t callback = NULL;
1184
1185 if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) {
1186 callback = (am_hal_mspi_callback_t)ctx->callback;
1187 }
1188
1189 ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA,
1190 callback, (void *)ctx->callback_ctx);
1191 } else {
1192 ret = am_hal_mspi_nonblocking_transfer(data->mspiHandle, &trans, MSPI_DMA,
1193 hal_mspi_callback,
1194 (void *)controller);
1195 }
1196 ctx->packets_left--;
1197 if (ret) {
1198 if (ret == AM_HAL_STATUS_OUT_OF_RANGE) {
1199 ret = -ENOMEM;
1200 } else {
1201 ret = -EIO;
1202 }
1203 goto dma_err;
1204 }
1205 }
1206
1207 if (!ctx->xfer.async) {
1208 while (ctx->packets_done < ctx->xfer.num_packet) {
1209 k_busy_wait(10);
1210 }
1211 }
1212
1213 dma_err:
1214 mspi_context_release(ctx);
1215 return ret;
1216 }
1217
mspi_ambiq_transceive(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_xfer * xfer)1218 static int mspi_ambiq_transceive(const struct device *controller,
1219 const struct mspi_dev_id *dev_id,
1220 const struct mspi_xfer *xfer)
1221 {
1222 const struct mspi_ambiq_config *cfg = controller->config;
1223 struct mspi_ambiq_data *data = controller->data;
1224 mspi_callback_handler_t cb = NULL;
1225 struct mspi_callback_context *cb_ctx = NULL;
1226
1227 if (dev_id != data->dev_id) {
1228 LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__);
1229 return -ESTALE;
1230 }
1231
1232 if (xfer->async) {
1233 cb = data->cbs[MSPI_BUS_XFER_COMPLETE];
1234 cb_ctx = data->cb_ctxs[MSPI_BUS_XFER_COMPLETE];
1235 }
1236
1237 if (xfer->xfer_mode == MSPI_PIO) {
1238 return mspi_pio_transceive(controller, xfer, cb, cb_ctx);
1239 } else if (xfer->xfer_mode == MSPI_DMA) {
1240 return mspi_dma_transceive(controller, xfer, cb, cb_ctx);
1241 } else {
1242 return -EIO;
1243 }
1244 }
1245
mspi_ambiq_register_callback(const struct device * controller,const struct mspi_dev_id * dev_id,const enum mspi_bus_event evt_type,mspi_callback_handler_t cb,struct mspi_callback_context * ctx)1246 static int mspi_ambiq_register_callback(const struct device *controller,
1247 const struct mspi_dev_id *dev_id,
1248 const enum mspi_bus_event evt_type,
1249 mspi_callback_handler_t cb,
1250 struct mspi_callback_context *ctx)
1251 {
1252 const struct mspi_ambiq_config *cfg = controller->config;
1253 struct mspi_ambiq_data *data = controller->data;
1254
1255 if (mspi_is_inp(controller)) {
1256 return -EBUSY;
1257 }
1258
1259 if (dev_id != data->dev_id) {
1260 LOG_INST_ERR(cfg->log, "%u, dev_id don't match.", __LINE__);
1261 return -ESTALE;
1262 }
1263
1264 if (evt_type != MSPI_BUS_XFER_COMPLETE) {
1265 LOG_INST_ERR(cfg->log, "%u, callback types not supported.", __LINE__);
1266 return -ENOTSUP;
1267 }
1268
1269 data->cbs[evt_type] = cb;
1270 data->cb_ctxs[evt_type] = ctx;
1271 return 0;
1272 }
1273
1274 #if CONFIG_PM_DEVICE
mspi_ambiq_pm_action(const struct device * controller,enum pm_device_action action)1275 static int mspi_ambiq_pm_action(const struct device *controller, enum pm_device_action action)
1276 {
1277 const struct mspi_ambiq_config *cfg = controller->config;
1278 struct mspi_ambiq_data *data = controller->data;
1279 int ret = 0;
1280
1281 if (mspi_is_inp(controller)) {
1282 return -EBUSY;
1283 }
1284
1285 switch (action) {
1286 case PM_DEVICE_ACTION_TURN_ON:
1287 ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_WAKE, true);
1288 if (ret) {
1289 LOG_INST_ERR(cfg->log, "%u, fail to power on MSPI, code:%d.", __LINE__,
1290 ret);
1291 return -EHOSTDOWN;
1292 }
1293 break;
1294
1295 case PM_DEVICE_ACTION_TURN_OFF:
1296 ret = am_hal_mspi_power_control(data->mspiHandle, AM_HAL_SYSCTRL_DEEPSLEEP, true);
1297 if (ret) {
1298 LOG_INST_ERR(cfg->log, "%u, fail to power off MSPI, code:%d.", __LINE__,
1299 ret);
1300 return -EHOSTDOWN;
1301 }
1302 break;
1303
1304 default:
1305 return -ENOTSUP;
1306 }
1307
1308 return 0;
1309 }
1310 #endif
1311
mspi_ambiq_init(const struct device * controller)1312 static int mspi_ambiq_init(const struct device *controller)
1313 {
1314 const struct mspi_ambiq_config *cfg = controller->config;
1315 const struct mspi_dt_spec spec = {
1316 .bus = controller,
1317 .config = cfg->mspicfg,
1318 };
1319
1320 return mspi_ambiq_config(&spec);
1321 }
1322
1323 static struct mspi_driver_api mspi_ambiq_driver_api = {
1324 .config = mspi_ambiq_config,
1325 .dev_config = mspi_ambiq_dev_config,
1326 .xip_config = mspi_ambiq_xip_config,
1327 .scramble_config = mspi_ambiq_scramble_config,
1328 .timing_config = mspi_ambiq_timing_config,
1329 .get_channel_status = mspi_ambiq_get_channel_status,
1330 .register_callback = mspi_ambiq_register_callback,
1331 .transceive = mspi_ambiq_transceive,
1332 };
1333
1334 #define MSPI_PINCTRL_STATE_INIT(state_idx, node_id) \
1335 COND_CODE_1(Z_PINCTRL_SKIP_STATE(state_idx, node_id), (), \
1336 ({ \
1337 .id = state_idx, \
1338 .pins = Z_PINCTRL_STATE_PINS_NAME(state_idx, node_id), \
1339 .pin_cnt = ARRAY_SIZE(Z_PINCTRL_STATE_PINS_NAME(state_idx, node_id)) \
1340 }))
1341
1342 #define MSPI_PINCTRL_STATES_DEFINE(node_id) \
1343 static const struct pinctrl_state \
1344 Z_PINCTRL_STATES_NAME(node_id)[] = { \
1345 LISTIFY(DT_NUM_PINCTRL_STATES(node_id), \
1346 MSPI_PINCTRL_STATE_INIT, (,), node_id) \
1347 };
1348
1349 #define MSPI_PINCTRL_DT_DEFINE(node_id) \
1350 LISTIFY(DT_NUM_PINCTRL_STATES(node_id), \
1351 Z_PINCTRL_STATE_PINS_DEFINE, (;), node_id); \
1352 MSPI_PINCTRL_STATES_DEFINE(node_id) \
1353 Z_PINCTRL_DEV_CONFIG_STATIC Z_PINCTRL_DEV_CONFIG_CONST \
1354 struct pinctrl_dev_config Z_PINCTRL_DEV_CONFIG_NAME(node_id) = \
1355 Z_PINCTRL_DEV_CONFIG_INIT(node_id)
1356
1357 #define MSPI_CONFIG(n) \
1358 { \
1359 .channel_num = (DT_INST_REG_ADDR(n) - REG_MSPI_BASEADDR) / \
1360 (DT_INST_REG_SIZE(n) * 4), \
1361 .op_mode = MSPI_OP_MODE_CONTROLLER, \
1362 .duplex = MSPI_HALF_DUPLEX, \
1363 .max_freq = MSPI_MAX_FREQ, \
1364 .dqs_support = false, \
1365 .num_periph = DT_INST_CHILD_NUM(n), \
1366 .sw_multi_periph = DT_INST_PROP(n, software_multiperipheral), \
1367 }
1368
1369 #define MSPI_HAL_DEVICE_CONFIG(n, cmdq, cmdq_size) \
1370 { \
1371 .ui8WriteLatency = 0, \
1372 .ui8TurnAround = 0, \
1373 .eAddrCfg = 0, \
1374 .eInstrCfg = 0, \
1375 .ui8ReadInstr = 0, \
1376 .ui8WriteInstr = 0, \
1377 .eDeviceConfig = AM_HAL_MSPI_FLASH_SERIAL_CE0, \
1378 .eSpiMode = AM_HAL_MSPI_SPI_MODE_0, \
1379 .eClockFreq = MSPI_MAX_FREQ / DT_INST_PROP_OR(n, \
1380 clock_frequency, \
1381 MSPI_MAX_FREQ), \
1382 .bEnWriteLatency = false, \
1383 .bSendAddr = false, \
1384 .bSendInstr = false, \
1385 .bTurnaround = false, \
1386 .bEmulateDDR = false, \
1387 .ui16DMATimeLimit = 0, \
1388 .eDMABoundary = AM_HAL_MSPI_BOUNDARY_NONE, \
1389 .ui32TCBSize = cmdq_size, \
1390 .pTCB = cmdq, \
1391 .scramblingStartAddr = 0, \
1392 .scramblingEndAddr = 0, \
1393 }
1394
1395 #define AMBIQ_MSPI_DEFINE(n) \
1396 LOG_INSTANCE_REGISTER(DT_DRV_INST(n), mspi##n, CONFIG_MSPI_LOG_LEVEL); \
1397 MSPI_PINCTRL_DT_DEFINE(DT_DRV_INST(n)); \
1398 static void mspi_ambiq_irq_cfg_func_##n(void) \
1399 { \
1400 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
1401 mspi_ambiq_isr, DEVICE_DT_INST_GET(n), 0); \
1402 irq_enable(DT_INST_IRQN(n)); \
1403 } \
1404 static uint32_t mspi_ambiq_cmdq##n[DT_INST_PROP_OR(n, cmdq_buffer_size, 1024) / 4] \
1405 __attribute__((section(DT_INST_PROP_OR(n, cmdq_buffer_location, ".mspi_buff")))); \
1406 static struct gpio_dt_spec ce_gpios##n[] = MSPI_CE_GPIOS_DT_SPEC_INST_GET(n); \
1407 static struct mspi_ambiq_data mspi_ambiq_data##n = { \
1408 .mspiHandle = NULL, \
1409 .hal_dev_cfg = MSPI_HAL_DEVICE_CONFIG(n, mspi_ambiq_cmdq##n, \
1410 DT_INST_PROP_OR(n, cmdq_buffer_size, 1024)), \
1411 .dev_id = 0, \
1412 .lock = Z_MUTEX_INITIALIZER(mspi_ambiq_data##n.lock), \
1413 .dev_cfg = {0}, \
1414 .xip_cfg = {0}, \
1415 .scramble_cfg = {0}, \
1416 .cbs = {0}, \
1417 .cb_ctxs = {0}, \
1418 .ctx.lock = Z_SEM_INITIALIZER(mspi_ambiq_data##n.ctx.lock, 0, 1), \
1419 .ctx.callback = 0, \
1420 .ctx.callback_ctx = 0, \
1421 }; \
1422 static const struct mspi_ambiq_config mspi_ambiq_config##n = { \
1423 .reg_base = DT_INST_REG_ADDR(n), \
1424 .reg_size = DT_INST_REG_SIZE(n), \
1425 .mspicfg = MSPI_CONFIG(n), \
1426 .mspicfg.ce_group = (struct gpio_dt_spec *)ce_gpios##n, \
1427 .mspicfg.num_ce_gpios = ARRAY_SIZE(ce_gpios##n), \
1428 .mspicfg.re_init = false, \
1429 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
1430 .irq_cfg_func = mspi_ambiq_irq_cfg_func_##n, \
1431 LOG_INSTANCE_PTR_INIT(log, DT_DRV_INST(n), mspi##n) \
1432 }; \
1433 PM_DEVICE_DT_INST_DEFINE(n, mspi_ambiq_pm_action); \
1434 DEVICE_DT_INST_DEFINE(n, \
1435 mspi_ambiq_init, \
1436 PM_DEVICE_DT_INST_GET(n), \
1437 &mspi_ambiq_data##n, \
1438 &mspi_ambiq_config##n, \
1439 POST_KERNEL, \
1440 CONFIG_MSPI_INIT_PRIORITY, \
1441 &mspi_ambiq_driver_api);
1442
1443 DT_INST_FOREACH_STATUS_OKAY(AMBIQ_MSPI_DEFINE)
1444