1 /*
2 * Copyright (c) 2024, Ambiq Micro Inc. <www.ambiq.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * This driver creates fake MSPI buses which can contain emulated devices,
7 * implemented by separate emulation drivers.
8 * The API between this driver and its emulators is defined by
9 * struct mspi_emul_driver_api.
10 */
11
12 #define DT_DRV_COMPAT zephyr_mspi_emul_controller
13
14 #define LOG_LEVEL CONFIG_MSPI_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(mspi_emul_controller);
17 #include <zephyr/kernel.h>
18 #include <zephyr/device.h>
19 #include <zephyr/drivers/emul.h>
20 #include <zephyr/drivers/mspi.h>
21 #include <zephyr/drivers/mspi_emul.h>
22
23 #define MSPI_MAX_FREQ 250000000
24 #define MSPI_MAX_DEVICE 2
25 #define MSPI_TIMEOUT_US 1000000
26 #define EMUL_MSPI_INST_ID 0
27
28 struct mspi_emul_context {
29 /* the request entity currently owns the lock */
30 const struct mspi_dev_id *owner;
31 /* the current transfer context */
32 struct mspi_xfer xfer;
33 /* the transfer controls */
34 bool asynchronous;
35 int packets_done;
36 /* the transfer callback and callback context */
37 mspi_callback_handler_t callback;
38 struct mspi_callback_context *callback_ctx;
39 /** the transfer lock */
40 struct k_sem lock;
41 };
42
43 struct mspi_emul_data {
44 /* List of struct mspi_emul associated with the device */
45 sys_slist_t emuls;
46 /* common mspi hardware configurations */
47 struct mspi_cfg mspicfg;
48 /* device id of the current device occupied the bus */
49 const struct mspi_dev_id *dev_id;
50 /* controller access mutex */
51 struct k_mutex lock;
52 /* device specific hardware settings */
53 struct mspi_dev_cfg dev_cfg;
54 /* XIP configurations */
55 struct mspi_xip_cfg xip_cfg;
56 /* scrambling configurations */
57 struct mspi_scramble_cfg scramble_cfg;
58 /* Timing configurations */
59 struct mspi_timing_cfg timing_cfg;
60 /* local storage of mspi callback hanlder */
61 mspi_callback_handler_t cbs[MSPI_BUS_EVENT_MAX];
62 /* local storage of mspi callback context */
63 struct mspi_callback_context *cb_ctxs[MSPI_BUS_EVENT_MAX];
64 /* local mspi context */
65 struct mspi_emul_context ctx;
66 };
67
68 /**
69 * Verify if the device with dev_id is on this MSPI bus.
70 *
71 * @param controller Pointer to the device structure for the driver instance.
72 * @param dev_id Pointer to the device ID structure from a device.
73 * @return 0 The device is on this MSPI bus.
74 * @return -ENODEV The device is not on this MSPI bus.
75 */
mspi_verify_device(const struct device * controller,const struct mspi_dev_id * dev_id)76 static inline int mspi_verify_device(const struct device *controller,
77 const struct mspi_dev_id *dev_id)
78 {
79 const struct mspi_emul_data *data = controller->data;
80 int device_index = data->mspicfg.num_periph;
81 int ret = 0;
82
83 if (data->mspicfg.num_ce_gpios != 0) {
84 for (int i = 0; i < data->mspicfg.num_periph; i++) {
85 if (dev_id->ce.port == data->mspicfg.ce_group[i].port &&
86 dev_id->ce.pin == data->mspicfg.ce_group[i].pin &&
87 dev_id->ce.dt_flags == data->mspicfg.ce_group[i].dt_flags) {
88 device_index = i;
89 }
90 }
91
92 if (device_index >= data->mspicfg.num_periph ||
93 device_index != dev_id->dev_idx) {
94 LOG_ERR("%u, invalid device ID.", __LINE__);
95 return -ENODEV;
96 }
97 } else {
98 if (dev_id->dev_idx >= data->mspicfg.num_periph) {
99 LOG_ERR("%u, invalid device ID.", __LINE__);
100 return -ENODEV;
101 }
102 }
103
104 return ret;
105 }
106
107 /**
108 * Check if the MSPI bus is busy.
109 *
110 * @param controller MSPI emulation controller device.
111 * @return true The MSPI bus is busy.
112 * @return false The MSPI bus is idle.
113 */
mspi_is_inp(const struct device * controller)114 static inline bool mspi_is_inp(const struct device *controller)
115 {
116 struct mspi_emul_data *data = controller->data;
117
118 return (k_sem_count_get(&data->ctx.lock) == 0);
119 }
120
121 /**
122 * Lock MSPI context.
123 *
124 * @param ctx Pointer to the MSPI context.
125 * @param req Pointer to the request entity represented by mspi_dev_id.
126 * @param xfer Pointer to the MSPI transfer started by req.
127 * @param callback MSPI call back function pointer.
128 * @param callback_ctx Pointer to the mspi callback context.
129 * @return 0 if allowed for hardware configuration.
130 * @return 1 if not allowed for hardware configuration.
131 */
mspi_context_lock(struct mspi_emul_context * ctx,const struct mspi_dev_id * req,const struct mspi_xfer * xfer,mspi_callback_handler_t callback,struct mspi_callback_context * callback_ctx)132 static inline int mspi_context_lock(struct mspi_emul_context *ctx,
133 const struct mspi_dev_id *req,
134 const struct mspi_xfer *xfer,
135 mspi_callback_handler_t callback,
136 struct mspi_callback_context *callback_ctx)
137 {
138 int ret = 0;
139
140 if (k_sem_take(&ctx->lock, K_MSEC(xfer->timeout))) {
141 return ret;
142 }
143
144 if (ctx->callback) {
145
146 if ((xfer->tx_dummy == ctx->xfer.tx_dummy) &&
147 (xfer->rx_dummy == ctx->xfer.rx_dummy) &&
148 (xfer->cmd_length == ctx->xfer.cmd_length) &&
149 (xfer->addr_length == ctx->xfer.addr_length)) {
150 ret = 1;
151 } else {
152 ret = 0;
153 }
154 }
155
156 ctx->owner = req;
157 ctx->xfer = *xfer;
158 ctx->packets_done = 0;
159 ctx->asynchronous = ctx->xfer.async;
160 ctx->callback = callback;
161 ctx->callback_ctx = callback_ctx;
162
163 return ret;
164 }
165
166 /**
167 * release MSPI context.
168 *
169 * @param ctx Pointer to the MSPI context.
170 */
mspi_context_release(struct mspi_emul_context * ctx)171 static inline void mspi_context_release(struct mspi_emul_context *ctx)
172 {
173 ctx->owner = NULL;
174 k_sem_give(&ctx->lock);
175 }
176
177 /**
178 * Configure hardware before a transfer.
179 *
180 * @param controller Pointer to the MSPI controller instance.
181 * @param xfer Pointer to the MSPI transfer started by the request entity.
182 * @return 0 if successful.
183 */
mspi_xfer_config(const struct device * controller,const struct mspi_xfer * xfer)184 static int mspi_xfer_config(const struct device *controller,
185 const struct mspi_xfer *xfer)
186 {
187 struct mspi_emul_data *data = controller->data;
188
189 data->dev_cfg.cmd_length = xfer->cmd_length;
190 data->dev_cfg.addr_length = xfer->addr_length;
191 data->dev_cfg.tx_dummy = xfer->tx_dummy;
192 data->dev_cfg.rx_dummy = xfer->rx_dummy;
193
194 return 0;
195 }
196
197 /**
198 * Check and save dev_cfg to controller data->dev_cfg.
199 *
200 * @param controller Pointer to the device structure for the driver instance.
201 * @param param_mask Macro definition of what to be configured in cfg.
202 * @param dev_cfg The device runtime configuration for the MSPI controller.
203 * @return 0 MSPI device configuration successful.
204 * @return -Error MSPI device configuration fail.
205 */
mspi_dev_cfg_check_save(const struct device * controller,const enum mspi_dev_cfg_mask param_mask,const struct mspi_dev_cfg * dev_cfg)206 static inline int mspi_dev_cfg_check_save(const struct device *controller,
207 const enum mspi_dev_cfg_mask param_mask,
208 const struct mspi_dev_cfg *dev_cfg)
209 {
210 struct mspi_emul_data *data = controller->data;
211
212 if (param_mask & MSPI_DEVICE_CONFIG_CE_NUM) {
213 data->dev_cfg.ce_num = dev_cfg->ce_num;
214 }
215
216 if (param_mask & MSPI_DEVICE_CONFIG_FREQUENCY) {
217 if (dev_cfg->freq > MSPI_MAX_FREQ) {
218 LOG_ERR("%u, freq is too large.", __LINE__);
219 return -ENOTSUP;
220 }
221 data->dev_cfg.freq = dev_cfg->freq;
222 }
223
224 if (param_mask & MSPI_DEVICE_CONFIG_IO_MODE) {
225 if (dev_cfg->io_mode >= MSPI_IO_MODE_MAX) {
226 LOG_ERR("%u, Invalid io_mode.", __LINE__);
227 return -EINVAL;
228 }
229 data->dev_cfg.io_mode = dev_cfg->io_mode;
230 }
231
232 if (param_mask & MSPI_DEVICE_CONFIG_DATA_RATE) {
233 if (dev_cfg->data_rate >= MSPI_DATA_RATE_MAX) {
234 LOG_ERR("%u, Invalid data_rate.", __LINE__);
235 return -EINVAL;
236 }
237 data->dev_cfg.data_rate = dev_cfg->data_rate;
238 }
239
240 if (param_mask & MSPI_DEVICE_CONFIG_CPP) {
241 if (dev_cfg->cpp > MSPI_CPP_MODE_3) {
242 LOG_ERR("%u, Invalid cpp.", __LINE__);
243 return -EINVAL;
244 }
245 data->dev_cfg.cpp = dev_cfg->cpp;
246 }
247
248 if (param_mask & MSPI_DEVICE_CONFIG_ENDIAN) {
249 if (dev_cfg->endian > MSPI_XFER_BIG_ENDIAN) {
250 LOG_ERR("%u, Invalid endian.", __LINE__);
251 return -EINVAL;
252 }
253 data->dev_cfg.endian = dev_cfg->endian;
254 }
255
256 if (param_mask & MSPI_DEVICE_CONFIG_CE_POL) {
257 if (dev_cfg->ce_polarity > MSPI_CE_ACTIVE_HIGH) {
258 LOG_ERR("%u, Invalid ce_polarity.", __LINE__);
259 return -EINVAL;
260 }
261 data->dev_cfg.ce_polarity = dev_cfg->ce_polarity;
262 }
263
264 if (param_mask & MSPI_DEVICE_CONFIG_DQS) {
265 if (dev_cfg->dqs_enable && !data->mspicfg.dqs_support) {
266 LOG_ERR("%u, DQS mode not supported.", __LINE__);
267 return -ENOTSUP;
268 }
269 data->dev_cfg.dqs_enable = dev_cfg->dqs_enable;
270 }
271
272 if (param_mask & MSPI_DEVICE_CONFIG_RX_DUMMY) {
273 data->dev_cfg.rx_dummy = dev_cfg->rx_dummy;
274 }
275
276 if (param_mask & MSPI_DEVICE_CONFIG_TX_DUMMY) {
277 data->dev_cfg.tx_dummy = dev_cfg->tx_dummy;
278 }
279
280 if (param_mask & MSPI_DEVICE_CONFIG_READ_CMD) {
281 data->dev_cfg.read_cmd = dev_cfg->read_cmd;
282 }
283
284 if (param_mask & MSPI_DEVICE_CONFIG_WRITE_CMD) {
285 data->dev_cfg.write_cmd = dev_cfg->write_cmd;
286 }
287
288 if (param_mask & MSPI_DEVICE_CONFIG_CMD_LEN) {
289 data->dev_cfg.cmd_length = dev_cfg->cmd_length;
290 }
291
292 if (param_mask & MSPI_DEVICE_CONFIG_ADDR_LEN) {
293 data->dev_cfg.addr_length = dev_cfg->addr_length;
294 }
295
296 if (param_mask & MSPI_DEVICE_CONFIG_MEM_BOUND) {
297 data->dev_cfg.mem_boundary = dev_cfg->mem_boundary;
298 }
299
300 if (param_mask & MSPI_DEVICE_CONFIG_BREAK_TIME) {
301 data->dev_cfg.time_to_break = dev_cfg->time_to_break;
302 }
303
304 return 0;
305 }
306
307 /**
308 * Check the transfer context from the request entity.
309 *
310 * @param xfer Pointer to the MSPI transfer started by the request entity.
311 * @return 0 if successful.
312 * @return -EINVAL invalid parameter detected.
313 */
mspi_xfer_check(const struct mspi_xfer * xfer)314 static inline int mspi_xfer_check(const struct mspi_xfer *xfer)
315 {
316 if (xfer->xfer_mode > MSPI_DMA) {
317 LOG_ERR("%u, Invalid xfer xfer_mode.", __LINE__);
318 return -EINVAL;
319 }
320
321 if (!xfer->packets || !xfer->num_packet) {
322 LOG_ERR("%u, Invalid xfer payload.", __LINE__);
323 return -EINVAL;
324 }
325
326 for (int i = 0; i < xfer->num_packet; ++i) {
327
328 if (!xfer->packets[i].data_buf ||
329 !xfer->packets[i].num_bytes) {
330 LOG_ERR("%u, Invalid xfer payload num: %u.", __LINE__, i);
331 return -EINVAL;
332 }
333
334 if (xfer->packets[i].dir > MSPI_TX) {
335 LOG_ERR("%u, Invalid xfer direction.", __LINE__);
336 return -EINVAL;
337 }
338
339 if (xfer->packets[i].cb_mask > MSPI_BUS_XFER_COMPLETE_CB) {
340 LOG_ERR("%u, Invalid xfer cb_mask.", __LINE__);
341 return -EINVAL;
342 }
343 }
344 return 0;
345 }
346
347 /**
348 * find_emul API implementation.
349 *
350 * @param controller Pointer to MSPI controller instance.
351 * @param dev_idx The device index of a mspi_emul.
352 * @return Pointer to a mspi_emul entity if successful.
353 * @return NULL if mspi_emul entity not found.
354 */
mspi_emul_find(const struct device * controller,uint16_t dev_idx)355 static struct mspi_emul *mspi_emul_find(const struct device *controller,
356 uint16_t dev_idx)
357 {
358 struct mspi_emul_data *data = controller->data;
359 sys_snode_t *node;
360
361 SYS_SLIST_FOR_EACH_NODE(&data->emuls, node) {
362 struct mspi_emul *emul;
363
364 emul = CONTAINER_OF(node, struct mspi_emul, node);
365 if (emul->dev_idx == dev_idx) {
366 return emul;
367 }
368 }
369
370 return NULL;
371 }
372
373 /**
374 * trigger_event API implementation.
375 *
376 * @param controller Pointer to MSPI controller instance.
377 * @param evt_type The bus event to trigger
378 * @return 0 if successful.
379 */
emul_mspi_trigger_event(const struct device * controller,enum mspi_bus_event evt_type)380 static int emul_mspi_trigger_event(const struct device *controller,
381 enum mspi_bus_event evt_type)
382 {
383 struct mspi_emul_data *data = controller->data;
384 struct mspi_emul_context *ctx = &data->ctx;
385
386 mspi_callback_handler_t cb;
387 struct mspi_callback_context *cb_context;
388
389 if (evt_type == MSPI_BUS_XFER_COMPLETE) {
390
391 if (ctx->callback && ctx->callback_ctx) {
392
393 struct mspi_event *evt = &ctx->callback_ctx->mspi_evt;
394 const struct mspi_xfer_packet *packet;
395
396 packet = &ctx->xfer.packets[ctx->packets_done];
397
398 evt->evt_type = MSPI_BUS_XFER_COMPLETE;
399 evt->evt_data.controller = controller;
400 evt->evt_data.dev_id = ctx->owner;
401 evt->evt_data.packet = packet;
402 evt->evt_data.packet_idx = ctx->packets_done;
403 ctx->packets_done++;
404
405 if (packet->cb_mask == MSPI_BUS_XFER_COMPLETE_CB) {
406 cb = ctx->callback;
407 cb_context = ctx->callback_ctx;
408 cb(cb_context);
409 }
410
411 } else {
412 LOG_WRN("%u, MSPI_BUS_XFER_COMPLETE callback not registered.", __LINE__);
413 }
414
415 } else {
416
417 cb = data->cbs[evt_type];
418 cb_context = data->cb_ctxs[evt_type];
419 if (cb) {
420 cb(cb_context);
421 } else {
422 LOG_ERR("%u, mspi callback type %u not registered.", __LINE__, evt_type);
423 return -EINVAL;
424 }
425 }
426
427 return 0;
428 }
429
430 /**
431 * API implementation of mspi_config.
432 *
433 * @param spec Pointer to MSPI device tree spec.
434 * @return 0 if successful.
435 * @return -Error if fail.
436 */
mspi_emul_config(const struct mspi_dt_spec * spec)437 static int mspi_emul_config(const struct mspi_dt_spec *spec)
438 {
439 const struct mspi_cfg *config = &spec->config;
440 struct mspi_emul_data *data = spec->bus->data;
441
442 int ret = 0;
443
444 if (config->op_mode > MSPI_OP_MODE_PERIPHERAL) {
445 LOG_ERR("%u, Invalid MSPI OP mode.", __LINE__);
446 return -EINVAL;
447 }
448
449 if (config->max_freq > MSPI_MAX_FREQ) {
450 LOG_ERR("%u, Invalid MSPI Frequency", __LINE__);
451 return -ENOTSUP;
452 }
453
454 if (config->duplex > MSPI_FULL_DUPLEX) {
455 LOG_ERR("%u, Invalid MSPI duplexity.", __LINE__);
456 return -EINVAL;
457 }
458
459 if (config->num_periph > MSPI_MAX_DEVICE) {
460 LOG_ERR("%u, Invalid MSPI peripheral number.", __LINE__);
461 return -ENOTSUP;
462 }
463
464 if (config->num_ce_gpios != 0 &&
465 config->num_ce_gpios != config->num_periph) {
466 LOG_ERR("%u, Invalid number of ce_gpios.", __LINE__);
467 return -EINVAL;
468 }
469
470 if (config->re_init) {
471 if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) {
472 LOG_ERR("%u, Failed to access controller.", __LINE__);
473 return -EBUSY;
474 }
475 while (mspi_is_inp(spec->bus)) {
476 }
477 }
478
479 /* emulate controller hardware initialization */
480 k_busy_wait(10);
481
482 if (!k_sem_count_get(&data->ctx.lock)) {
483 data->ctx.owner = NULL;
484 k_sem_give(&data->ctx.lock);
485 }
486
487 if (config->re_init) {
488 k_mutex_unlock(&data->lock);
489 }
490
491 data->mspicfg = *config;
492
493 return ret;
494 }
495
496 /**
497 * API implementation of mspi_dev_config.
498 *
499 * @param controller Pointer to the device structure for the driver instance.
500 * @param dev_id Pointer to the device ID structure from a device.
501 * @param param_mask Macro definition of what to be configured in cfg.
502 * @param dev_cfg The device runtime configuration for the MSPI controller.
503 *
504 * @retval 0 if successful.
505 * @retval -EINVAL invalid capabilities, failed to configure device.
506 * @retval -ENOTSUP capability not supported by MSPI peripheral.
507 */
mspi_emul_dev_config(const struct device * controller,const struct mspi_dev_id * dev_id,const enum mspi_dev_cfg_mask param_mask,const struct mspi_dev_cfg * dev_cfg)508 static int mspi_emul_dev_config(const struct device *controller,
509 const struct mspi_dev_id *dev_id,
510 const enum mspi_dev_cfg_mask param_mask,
511 const struct mspi_dev_cfg *dev_cfg)
512 {
513 struct mspi_emul_data *data = controller->data;
514 int ret = 0;
515
516 if (data->dev_id != dev_id) {
517 if (k_mutex_lock(&data->lock, K_MSEC(CONFIG_MSPI_COMPLETION_TIMEOUT_TOLERANCE))) {
518 LOG_ERR("%u, Failed to access controller.", __LINE__);
519 return -EBUSY;
520 }
521
522 ret = mspi_verify_device(controller, dev_id);
523 if (ret) {
524 goto e_return;
525 }
526 }
527
528 while (mspi_is_inp(controller)) {
529 }
530
531 if (param_mask == MSPI_DEVICE_CONFIG_NONE &&
532 !data->mspicfg.sw_multi_periph) {
533 /* Do nothing except obtaining the controller lock */
534 } else if (param_mask < MSPI_DEVICE_CONFIG_ALL) {
535 if (data->dev_id != dev_id) {
536 /* MSPI_DEVICE_CONFIG_ALL should be used */
537 LOG_ERR("%u, config failed, must be the same device.", __LINE__);
538 ret = -ENOTSUP;
539 goto e_return;
540 }
541 ret = mspi_dev_cfg_check_save(controller, param_mask, dev_cfg);
542 if (ret) {
543 goto e_return;
544 }
545 } else if (param_mask == MSPI_DEVICE_CONFIG_ALL) {
546 ret = mspi_dev_cfg_check_save(controller, param_mask, dev_cfg);
547 if (ret) {
548 goto e_return;
549 }
550 if (data->dev_id != dev_id) {
551 /* Conduct device switching */
552 }
553 } else {
554 LOG_ERR("%u, Invalid param_mask.", __LINE__);
555 ret = -EINVAL;
556 goto e_return;
557 }
558
559 data->dev_id = dev_id;
560 return ret;
561
562 e_return:
563 k_mutex_unlock(&data->lock);
564 return ret;
565 }
566
567 /**
568 * API implementation of mspi_xip_config.
569 *
570 * @param controller Pointer to the device structure for the driver instance.
571 * @param dev_id Pointer to the device ID structure from a device.
572 * @param xip_cfg The controller XIP configuration for MSPI.
573 *
574 * @retval 0 if successful.
575 * @retval -ESTALE device ID don't match, need to call mspi_dev_config first.
576 */
mspi_emul_xip_config(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_xip_cfg * xip_cfg)577 static int mspi_emul_xip_config(const struct device *controller,
578 const struct mspi_dev_id *dev_id,
579 const struct mspi_xip_cfg *xip_cfg)
580 {
581 struct mspi_emul_data *data = controller->data;
582 int ret = 0;
583
584 if (dev_id != data->dev_id) {
585 LOG_ERR("%u, dev_id don't match.", __LINE__);
586 return -ESTALE;
587 }
588
589 data->xip_cfg = *xip_cfg;
590 return ret;
591 }
592
593 /**
594 * API implementation of mspi_scramble_config.
595 *
596 * @param controller Pointer to the device structure for the driver instance.
597 * @param dev_id Pointer to the device ID structure from a device.
598 * @param scramble_cfg The controller scramble configuration for MSPI.
599 *
600 * @retval 0 if successful.
601 * @retval -ESTALE device ID don't match, need to call mspi_dev_config first.
602 */
mspi_emul_scramble_config(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_scramble_cfg * scramble_cfg)603 static int mspi_emul_scramble_config(const struct device *controller,
604 const struct mspi_dev_id *dev_id,
605 const struct mspi_scramble_cfg *scramble_cfg)
606 {
607 struct mspi_emul_data *data = controller->data;
608 int ret = 0;
609
610 while (mspi_is_inp(controller)) {
611 }
612
613 if (dev_id != data->dev_id) {
614 LOG_ERR("%u, dev_id don't match.", __LINE__);
615 return -ESTALE;
616 }
617
618 data->scramble_cfg = *scramble_cfg;
619 return ret;
620 }
621
622 /**
623 * API implementation of mspi_timing_config.
624 *
625 * @param controller Pointer to the device structure for the driver instance.
626 * @param dev_id Pointer to the device ID structure from a device.
627 * @param param_mask The macro definition of what should be configured in cfg.
628 * @param timing_cfg The controller timing configuration for MSPI.
629 *
630 * @retval 0 if successful.
631 * @retval -ESTALE device ID don't match, need to call mspi_dev_config first.
632 * @retval -ENOTSUP param_mask value is not supported.
633 */
mspi_emul_timing_config(const struct device * controller,const struct mspi_dev_id * dev_id,const uint32_t param_mask,void * timing_cfg)634 static int mspi_emul_timing_config(const struct device *controller,
635 const struct mspi_dev_id *dev_id,
636 const uint32_t param_mask,
637 void *timing_cfg)
638 {
639 struct mspi_emul_data *data = controller->data;
640 int ret = 0;
641
642 while (mspi_is_inp(controller)) {
643 }
644
645 if (dev_id != data->dev_id) {
646 LOG_ERR("%u, dev_id don't match.", __LINE__);
647 return -ESTALE;
648 }
649
650 if (param_mask == MSPI_TIMING_PARAM_DUMMY) {
651 data->timing_cfg = *(struct mspi_timing_cfg *)timing_cfg;
652 } else {
653 LOG_ERR("%u, param_mask not supported.", __LINE__);
654 return -ENOTSUP;
655 }
656
657 return ret;
658 }
659
660 /**
661 * API implementation of mspi_get_channel_status.
662 *
663 * @param controller Pointer to the device structure for the driver instance.
664 * @param ch Not used.
665 *
666 * @retval 0 if successful.
667 * @retval -EBUSY MSPI bus is busy
668 */
mspi_emul_get_channel_status(const struct device * controller,uint8_t ch)669 static int mspi_emul_get_channel_status(const struct device *controller, uint8_t ch)
670 {
671 struct mspi_emul_data *data = controller->data;
672
673 ARG_UNUSED(ch);
674
675 if (mspi_is_inp(controller)) {
676 return -EBUSY;
677 }
678
679 k_mutex_unlock(&data->lock);
680 data->dev_id = NULL;
681
682 return 0;
683 }
684
685 /**
686 * API implementation of mspi_register_callback.
687 *
688 * @param controller Pointer to the device structure for the driver instance.
689 * @param dev_id Pointer to the device ID structure from a device.
690 * @param evt_type The event type associated the callback.
691 * @param cb Pointer to the user implemented callback function.
692 * @param ctx Pointer to the callback context.
693 *
694 * @retval 0 if successful.
695 * @retval -ESTALE device ID don't match, need to call mspi_dev_config first.
696 * @retval -ENOTSUP evt_type not supported.
697 */
mspi_emul_register_callback(const struct device * controller,const struct mspi_dev_id * dev_id,const enum mspi_bus_event evt_type,mspi_callback_handler_t cb,struct mspi_callback_context * ctx)698 static int mspi_emul_register_callback(const struct device *controller,
699 const struct mspi_dev_id *dev_id,
700 const enum mspi_bus_event evt_type,
701 mspi_callback_handler_t cb,
702 struct mspi_callback_context *ctx)
703 {
704 struct mspi_emul_data *data = controller->data;
705
706 while (mspi_is_inp(controller)) {
707 }
708
709 if (dev_id != data->dev_id) {
710 LOG_ERR("%u, dev_id don't match.", __LINE__);
711 return -ESTALE;
712 }
713
714 if (evt_type >= MSPI_BUS_EVENT_MAX) {
715 LOG_ERR("%u, callback types not supported.", __LINE__);
716 return -ENOTSUP;
717 }
718
719 data->cbs[evt_type] = cb;
720 data->cb_ctxs[evt_type] = ctx;
721 return 0;
722 }
723
724 /**
725 * API implementation of mspi_transceive.
726 *
727 * @param controller Pointer to the device structure for the driver instance.
728 * @param dev_id Pointer to the device ID structure from a device.
729 * @param xfer Pointer to the MSPI transfer started by dev_id.
730 *
731 * @retval 0 if successful.
732 * @retval -ESTALE device ID don't match, need to call mspi_dev_config first.
733 * @retval -Error transfer failed.
734 */
mspi_emul_transceive(const struct device * controller,const struct mspi_dev_id * dev_id,const struct mspi_xfer * xfer)735 static int mspi_emul_transceive(const struct device *controller,
736 const struct mspi_dev_id *dev_id,
737 const struct mspi_xfer *xfer)
738 {
739 struct mspi_emul_data *data = controller->data;
740 struct mspi_emul_context *ctx = &data->ctx;
741 struct mspi_emul *emul;
742 mspi_callback_handler_t cb = NULL;
743 struct mspi_callback_context *cb_ctx = NULL;
744 int ret = 0;
745 int cfg_flag = 0;
746
747 emul = mspi_emul_find(controller, dev_id->dev_idx);
748 if (!emul) {
749 LOG_ERR("%u, mspi_emul not found.", __LINE__);
750 return -EIO;
751 }
752
753 if (dev_id != data->dev_id) {
754 LOG_ERR("%u, dev_id don't match.", __LINE__);
755 return -ESTALE;
756 }
757
758 ret = mspi_xfer_check(xfer);
759 if (ret) {
760 return ret;
761 }
762
763 __ASSERT_NO_MSG(emul->api);
764 __ASSERT_NO_MSG(emul->api->transceive);
765
766 if (xfer->async) {
767 cb = data->cbs[MSPI_BUS_XFER_COMPLETE];
768 cb_ctx = data->cb_ctxs[MSPI_BUS_XFER_COMPLETE];
769 }
770
771 cfg_flag = mspi_context_lock(ctx, dev_id, xfer, cb, cb_ctx);
772
773 if (cfg_flag) {
774 if (cfg_flag == 1) {
775 ret = mspi_xfer_config(controller, xfer);
776 if (ret) {
777 LOG_ERR("%u, xfer config fail.", __LINE__);
778 goto trans_err;
779 }
780 } else {
781 ret = cfg_flag;
782 LOG_ERR("%u, xfer fail.", __LINE__);
783 goto trans_err;
784 }
785 }
786
787 ret = emul->api->transceive(emul->target,
788 ctx->xfer.packets,
789 ctx->xfer.num_packet,
790 ctx->asynchronous, MSPI_TIMEOUT_US);
791
792 trans_err:
793 mspi_context_release(ctx);
794
795 return ret;
796 }
797
798 /**
799 * Set up a new emulator and add its child to the list.
800 *
801 * @param dev MSPI emulation controller.
802 *
803 * @retval 0 if successful.
804 */
mspi_emul_init(const struct device * dev)805 static int mspi_emul_init(const struct device *dev)
806 {
807 struct mspi_emul_data *data = dev->data;
808 const struct mspi_dt_spec spec = {
809 .bus = dev,
810 .config = data->mspicfg,
811 };
812 int ret = 0;
813
814 ret = mspi_emul_config(&spec);
815 if (ret) {
816 return ret;
817 }
818
819 sys_slist_init(&data->emuls);
820
821 return emul_init_for_bus(dev);
822 }
823
824 /**
825 * add its child to the list.
826 *
827 * @param dev MSPI emulation controller.
828 * @param emul MSPI emulation device.
829 *
830 * @retval 0 if successful.
831 */
mspi_emul_register(const struct device * dev,struct mspi_emul * emul)832 int mspi_emul_register(const struct device *dev, struct mspi_emul *emul)
833 {
834 struct mspi_emul_data *data = dev->data;
835 const char *name = emul->target->dev->name;
836
837 sys_slist_append(&data->emuls, &emul->node);
838
839 LOG_INF("Register emulator '%s', id:%x\n", name, emul->dev_idx);
840
841 return 0;
842 }
843
844 /* Device instantiation */
845 static struct emul_mspi_driver_api emul_mspi_driver_api = {
846 .mspi_api = {
847 .config = mspi_emul_config,
848 .dev_config = mspi_emul_dev_config,
849 .xip_config = mspi_emul_xip_config,
850 .scramble_config = mspi_emul_scramble_config,
851 .timing_config = mspi_emul_timing_config,
852 .get_channel_status = mspi_emul_get_channel_status,
853 .register_callback = mspi_emul_register_callback,
854 .transceive = mspi_emul_transceive,
855 },
856 .trigger_event = emul_mspi_trigger_event,
857 .find_emul = mspi_emul_find,
858 };
859
860 #define MSPI_CONFIG(n) \
861 { \
862 .channel_num = EMUL_MSPI_INST_ID, \
863 .op_mode = DT_INST_ENUM_IDX_OR(n, op_mode, MSPI_OP_MODE_CONTROLLER),\
864 .duplex = DT_INST_ENUM_IDX_OR(n, duplex, MSPI_HALF_DUPLEX), \
865 .max_freq = DT_INST_PROP(n, clock_frequency), \
866 .dqs_support = DT_INST_PROP_OR(n, dqs_support, false), \
867 .sw_multi_periph = DT_INST_PROP(n, software_multiperipheral), \
868 }
869
870 #define EMUL_LINK_AND_COMMA(node_id) \
871 { \
872 .dev = DEVICE_DT_GET(node_id), \
873 },
874
875 #define MSPI_EMUL_INIT(n) \
876 static const struct emul_link_for_bus emuls_##n[] = { \
877 DT_FOREACH_CHILD_STATUS_OKAY(DT_DRV_INST(n), EMUL_LINK_AND_COMMA)}; \
878 static struct emul_list_for_bus mspi_emul_cfg_##n = { \
879 .children = emuls_##n, \
880 .num_children = ARRAY_SIZE(emuls_##n), \
881 }; \
882 static struct gpio_dt_spec ce_gpios##n[] = MSPI_CE_GPIOS_DT_SPEC_INST_GET(n); \
883 static struct mspi_emul_data mspi_emul_data_##n = { \
884 .mspicfg = MSPI_CONFIG(n), \
885 .mspicfg.ce_group = (struct gpio_dt_spec *)ce_gpios##n, \
886 .mspicfg.num_ce_gpios = ARRAY_SIZE(ce_gpios##n), \
887 .mspicfg.num_periph = DT_INST_CHILD_NUM(n), \
888 .mspicfg.re_init = false, \
889 .dev_id = 0, \
890 .lock = Z_MUTEX_INITIALIZER(mspi_emul_data_##n.lock), \
891 .dev_cfg = {0}, \
892 .xip_cfg = {0}, \
893 .scramble_cfg = {0}, \
894 .cbs = {0}, \
895 .cb_ctxs = {0}, \
896 .ctx.lock = Z_SEM_INITIALIZER(mspi_emul_data_##n.ctx.lock, 0, 1), \
897 .ctx.callback = 0, \
898 .ctx.callback_ctx = 0, \
899 }; \
900 DEVICE_DT_INST_DEFINE(n, \
901 &mspi_emul_init, \
902 NULL, \
903 &mspi_emul_data_##n, \
904 &mspi_emul_cfg_##n, \
905 POST_KERNEL, \
906 CONFIG_MSPI_INIT_PRIORITY, \
907 &emul_mspi_driver_api);
908
909 DT_INST_FOREACH_STATUS_OKAY(MSPI_EMUL_INIT)
910