1 /*
2 * Copyright 2022-2023 NXP
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT nxp_s32_spi
8
9 #include <zephyr/drivers/clock_control.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include "spi_nxp_s32.h"
12
13 extern Spi_Ip_StateStructureType * Spi_Ip_apxStateStructureArray[SPI_INSTANCE_COUNT];
14
spi_nxp_s32_last_packet(struct spi_nxp_s32_data * data)15 static bool spi_nxp_s32_last_packet(struct spi_nxp_s32_data *data)
16 {
17 struct spi_context *ctx = &data->ctx;
18
19 if (ctx->tx_count <= 1U && ctx->rx_count <= 1U) {
20 if (!spi_context_tx_on(ctx) && (data->transfer_len == ctx->rx_len)) {
21 return true;
22 }
23
24 if (!spi_context_rx_on(ctx) && (data->transfer_len == ctx->tx_len)) {
25 return true;
26 }
27
28 if ((ctx->rx_len == ctx->tx_len) && (data->transfer_len == ctx->tx_len)) {
29 return true;
30 }
31 }
32
33 return false;
34 }
35
spi_nxp_s32_transfer_done(struct spi_context * ctx)36 static inline bool spi_nxp_s32_transfer_done(struct spi_context *ctx)
37 {
38 return !spi_context_tx_on(ctx) && !spi_context_rx_on(ctx);
39 }
40
spi_nxp_s32_transfer_next_packet(const struct device * dev)41 static int spi_nxp_s32_transfer_next_packet(const struct device *dev)
42 {
43 const struct spi_nxp_s32_config *config = dev->config;
44 struct spi_nxp_s32_data *data = dev->data;
45
46 Spi_Ip_StatusType status;
47 Spi_Ip_CallbackType data_cb;
48
49 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
50 data_cb = config->cb;
51 #else
52 data_cb = NULL;
53 #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */
54
55 data->transfer_len = spi_context_max_continuous_chunk(&data->ctx);
56 data->transfer_len = MIN(data->transfer_len,
57 SPI_NXP_S32_MAX_BYTES_PER_PACKAGE(data->bytes_per_frame));
58
59 /*
60 * Keep CS signal asserted until the last package, there is no other way
61 * than directly intervening to internal state of low level driver
62 */
63 Spi_Ip_apxStateStructureArray[config->spi_hw_cfg->Instance]->KeepCs =
64 !spi_nxp_s32_last_packet(data);
65
66 status = Spi_Ip_AsyncTransmit(&data->transfer_cfg, (uint8_t *)data->ctx.tx_buf,
67 data->ctx.rx_buf, data->transfer_len, data_cb);
68
69 if (status) {
70 LOG_ERR("Transfer could not start");
71 return -EIO;
72 }
73
74 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
75 return 0;
76 #else
77
78 while (Spi_Ip_GetStatus(config->spi_hw_cfg->Instance) == SPI_IP_BUSY) {
79 Spi_Ip_ManageBuffers(config->spi_hw_cfg->Instance);
80 }
81
82 if (Spi_Ip_GetStatus(config->spi_hw_cfg->Instance) == SPI_IP_FAULT) {
83 return -EIO;
84 }
85
86 return 0;
87 #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */
88 }
89
90 /*
91 * The function to get Scaler and Prescaler for corresponding registers
92 * to configure the baudrate for the transmission. The real frequency is
93 * computated to ensure it will always equal or the nearest approximation
94 * lower to the expected one.
95 */
spi_nxp_s32_getbestfreq(uint32_t clock_frequency,uint32_t requested_baud,struct spi_nxp_s32_baudrate_param * best_baud)96 static void spi_nxp_s32_getbestfreq(uint32_t clock_frequency,
97 uint32_t requested_baud,
98 struct spi_nxp_s32_baudrate_param *best_baud)
99 {
100 uint8_t scaler;
101 uint8_t prescaler;
102
103 uint32_t low, high;
104 uint32_t curr_freq;
105
106 uint32_t best_freq = 0U;
107
108 static const uint8_t prescaler_arr[SPI_NXP_S32_NUM_PRESCALER] = {2U, 3U, 5U, 7U};
109
110 static const uint16_t scaller_arr[SPI_NXP_S32_NUM_SCALER] = {
111 2U, 4U, 6U, 8U, 16U, 32U, 64U, 128U, 256U, 512U, 1024U, 2048U,
112 4096U, 8192U, 16384U, 32768U
113 };
114
115 for (prescaler = 0U; prescaler < SPI_NXP_S32_NUM_PRESCALER; prescaler++) {
116 low = 0U;
117 high = SPI_NXP_S32_NUM_SCALER - 1U;
118
119 /* Implement golden section search algorithm */
120 do {
121 scaler = (low + high) / 2U;
122
123 curr_freq = clock_frequency * 1U /
124 (prescaler_arr[prescaler] * scaller_arr[scaler]);
125
126 /*
127 * If the scaler make current frequency higher than the
128 * expected one, skip the next step
129 */
130 if (curr_freq > requested_baud) {
131 low = scaler;
132 continue;
133 } else {
134 high = scaler;
135 }
136
137 if ((requested_baud - best_freq) > (requested_baud - curr_freq)) {
138 best_freq = curr_freq;
139 best_baud->prescaler = prescaler;
140 best_baud->scaler = scaler;
141 }
142
143 if (best_freq == requested_baud) {
144 break;
145 }
146
147 } while ((high - low) > 1U);
148
149 if ((high - low) <= 1U) {
150
151 if (high == scaler) {
152 /* use low value */
153 scaler = low;
154 } else {
155 scaler = high;
156 }
157
158 curr_freq = clock_frequency * 1U /
159 (prescaler_arr[prescaler] * scaller_arr[scaler]);
160
161 if (curr_freq <= requested_baud) {
162
163 if ((requested_baud - best_freq) > (requested_baud - curr_freq)) {
164 best_freq = curr_freq;
165 best_baud->prescaler = prescaler;
166 best_baud->scaler = scaler;
167 }
168 }
169 }
170
171 if (best_freq == requested_baud) {
172 break;
173 }
174 }
175
176 best_baud->frequency = best_freq;
177 }
178
179 /*
180 * The function to get Scaler and Prescaler for corresponding registers
181 * to configure the delay for the transmission. The real delay is computated
182 * to ensure it will always equal or the nearest approximation higher to
183 * the expected one. In the worst case, use the delay as much as possible.
184 */
spi_nxp_s32_getbestdelay(uint32_t clock_frequency,uint32_t requested_delay,uint8_t * best_scaler,uint8_t * best_prescaler)185 static void spi_nxp_s32_getbestdelay(uint32_t clock_frequency, uint32_t requested_delay,
186 uint8_t *best_scaler, uint8_t *best_prescaler)
187 {
188 uint32_t current_delay;
189 uint8_t scaler, prescaler;
190 uint32_t low, high;
191
192 uint32_t best_delay = 0xFFFFFFFFU;
193
194 /* The scaler array is a power of two, so do not need to be defined */
195 static const uint8_t prescaler_arr[SPI_NXP_S32_NUM_PRESCALER] = {1U, 3U, 5U, 7U};
196
197 clock_frequency = clock_frequency / MHZ(1);
198
199 for (prescaler = 0; prescaler < SPI_NXP_S32_NUM_PRESCALER; prescaler++) {
200 low = 0U;
201 high = SPI_NXP_S32_NUM_SCALER - 1U;
202
203 do {
204 scaler = (low + high) / 2U;
205
206 current_delay = NSEC_PER_USEC * prescaler_arr[prescaler]
207 * (1U << (scaler + 1)) / clock_frequency;
208
209 /*
210 * If the scaler make current delay smaller than
211 * the expected one, skip the next step
212 */
213 if (current_delay < requested_delay) {
214 low = scaler;
215 continue;
216 } else {
217 high = scaler;
218 }
219
220 if ((best_delay - requested_delay) > (current_delay - requested_delay)) {
221 best_delay = current_delay;
222 *best_prescaler = prescaler;
223 *best_scaler = scaler;
224 }
225
226 if (best_delay == requested_delay) {
227 break;
228 }
229
230 } while ((high - low) > 1U);
231
232 if ((high - low) <= 1U) {
233
234 if (high == scaler) {
235 /* use low value */
236 scaler = low;
237 } else {
238 scaler = high;
239 }
240
241 current_delay = NSEC_PER_USEC * prescaler_arr[prescaler]
242 * (1U << (scaler + 1)) / clock_frequency;
243
244 if (current_delay >= requested_delay) {
245 if ((best_delay - requested_delay) >
246 (current_delay - requested_delay)) {
247
248 best_delay = current_delay;
249 *best_prescaler = prescaler;
250 *best_scaler = scaler;
251 }
252 }
253 }
254
255 if (best_delay == requested_delay) {
256 break;
257 }
258 }
259
260 if (best_delay == 0xFFFFFFFFU) {
261 /* Use the delay as much as possible */
262 *best_prescaler = SPI_NXP_S32_NUM_PRESCALER - 1U;
263 *best_scaler = SPI_NXP_S32_NUM_SCALER - 1U;
264 }
265 }
266
spi_nxp_s32_configure(const struct device * dev,const struct spi_config * spi_cfg)267 static int spi_nxp_s32_configure(const struct device *dev,
268 const struct spi_config *spi_cfg)
269 {
270 const struct spi_nxp_s32_config *config = dev->config;
271 struct spi_nxp_s32_data *data = dev->data;
272
273 bool clk_phase, clk_polarity;
274 bool lsb, hold_cs;
275 bool slave_mode, cs_active_high;
276
277 uint8_t frame_size;
278
279 struct spi_nxp_s32_baudrate_param best_baud = {0};
280 uint32_t clock_rate;
281 int err;
282
283 if (spi_context_configured(&data->ctx, spi_cfg)) {
284 /* This configuration is already in use */
285 return 0;
286 }
287
288 err = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_rate);
289 if (err) {
290 LOG_ERR("Failed to get clock frequency");
291 return err;
292 }
293
294 clk_phase = !!(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPHA);
295 clk_polarity = !!(SPI_MODE_GET(spi_cfg->operation) & SPI_MODE_CPOL);
296
297 hold_cs = !!(spi_cfg->operation & SPI_HOLD_ON_CS);
298 lsb = !!(spi_cfg->operation & SPI_TRANSFER_LSB);
299
300 slave_mode = !!(SPI_OP_MODE_GET(spi_cfg->operation));
301 frame_size = SPI_WORD_SIZE_GET(spi_cfg->operation);
302 cs_active_high = !!(spi_cfg->operation & SPI_CS_ACTIVE_HIGH);
303
304 if (slave_mode == (!!(config->spi_hw_cfg->Mcr & SPI_MCR_MSTR_MASK))) {
305 LOG_ERR("SPI mode (master/slave) must be same as configured in DT");
306 return -ENOTSUP;
307 }
308
309 if (slave_mode && !IS_ENABLED(CONFIG_SPI_SLAVE)) {
310 LOG_ERR("Kconfig for enable SPI in slave mode is not enabled");
311 return -ENOTSUP;
312 }
313
314 if (slave_mode && lsb) {
315 LOG_ERR("SPI does not support to shifting out with LSB in slave mode");
316 return -ENOTSUP;
317 }
318
319 if (spi_cfg->slave >= config->num_cs) {
320 LOG_ERR("Slave %d excess the allowed maximum value (%d)",
321 spi_cfg->slave, config->num_cs - 1);
322 return -ENOTSUP;
323 }
324
325 if (frame_size > 32U) {
326 LOG_ERR("Unsupported frame size %d bits", frame_size);
327 return -ENOTSUP;
328 }
329
330 if ((spi_cfg->operation & SPI_LINES_MASK) != SPI_LINES_SINGLE) {
331 LOG_ERR("Only single line mode is supported");
332 return -ENOTSUP;
333 }
334
335 if (spi_cfg->operation & SPI_MODE_LOOP) {
336 LOG_ERR("Loopback mode is not supported");
337 return -ENOTSUP;
338 }
339
340 if (cs_active_high && !spi_cs_is_gpio(spi_cfg)) {
341 LOG_ERR("For CS has active state is high, a GPIO pin must be used to"
342 " control CS line instead");
343 return -ENOTSUP;
344 }
345
346 if (!slave_mode) {
347
348 if ((spi_cfg->frequency < SPI_NXP_S32_MIN_FREQ) ||
349 (spi_cfg->frequency > SPI_NXP_S32_MAX_FREQ)) {
350
351 LOG_ERR("The frequency is out of range");
352 return -ENOTSUP;
353 }
354
355 spi_nxp_s32_getbestfreq(clock_rate, spi_cfg->frequency, &best_baud);
356
357 data->transfer_cfg.Ctar &= ~(SPI_CTAR_BR_MASK | SPI_CTAR_PBR_MASK);
358 data->transfer_cfg.Ctar |= SPI_CTAR_BR(best_baud.scaler) |
359 SPI_CTAR_PBR(best_baud.prescaler);
360
361 data->transfer_cfg.PushrCmd &= ~((SPI_PUSHR_CONT_MASK | SPI_PUSHR_PCS_MASK) >> 16U);
362
363 if (!spi_cs_is_gpio(spi_cfg)) {
364 /* Use inner CS signal from SPI module */
365 data->transfer_cfg.PushrCmd |= hold_cs << 15U;
366 data->transfer_cfg.PushrCmd |= (1U << spi_cfg->slave);
367 }
368 }
369
370 data->transfer_cfg.Ctar &= ~(SPI_CTAR_CPHA_MASK | SPI_CTAR_CPOL_MASK);
371 data->transfer_cfg.Ctar |= SPI_CTAR_CPHA(clk_phase) | SPI_CTAR_CPOL(clk_polarity);
372
373 Spi_Ip_UpdateFrameSize(&data->transfer_cfg, frame_size);
374 Spi_Ip_UpdateLsb(&data->transfer_cfg, lsb);
375
376 data->ctx.config = spi_cfg;
377 data->bytes_per_frame = SPI_NXP_S32_BYTE_PER_FRAME(frame_size);
378
379 if (slave_mode) {
380 LOG_DBG("SPI configuration: cpol = %u, cpha = %u,"
381 " lsb = %u, frame_size = %u, mode: slave",
382 clk_polarity, clk_phase, lsb, frame_size);
383 } else {
384 LOG_DBG("SPI configuration: frequency = %uHz, cpol = %u,"
385 " cpha = %u, lsb = %u, hold_cs = %u, frame_size = %u,"
386 " mode: master, CS = %u\n",
387 best_baud.frequency, clk_polarity, clk_phase,
388 lsb, hold_cs, frame_size, spi_cfg->slave);
389 }
390
391 return 0;
392 }
393
transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)394 static int transceive(const struct device *dev,
395 const struct spi_config *spi_cfg,
396 const struct spi_buf_set *tx_bufs,
397 const struct spi_buf_set *rx_bufs,
398 bool asynchronous,
399 spi_callback_t cb,
400 void *userdata)
401 {
402 struct spi_nxp_s32_data *data = dev->data;
403 struct spi_context *context = &data->ctx;
404 int ret;
405
406 if (!tx_bufs && !rx_bufs) {
407 return 0;
408 }
409
410 #ifndef CONFIG_NXP_S32_SPI_INTERRUPT
411 if (asynchronous) {
412 return -ENOTSUP;
413 }
414 #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */
415
416 spi_context_lock(context, asynchronous, cb, userdata, spi_cfg);
417
418 ret = spi_nxp_s32_configure(dev, spi_cfg);
419 if (ret) {
420 LOG_ERR("An error occurred in the SPI configuration");
421 spi_context_release(context, ret);
422 return ret;
423 }
424
425 spi_context_buffers_setup(context, tx_bufs, rx_bufs, 1U);
426
427 if (spi_nxp_s32_transfer_done(context)) {
428 spi_context_release(context, 0);
429 return 0;
430 }
431
432 spi_context_cs_control(context, true);
433
434 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
435 ret = spi_nxp_s32_transfer_next_packet(dev);
436
437 if (!ret) {
438 ret = spi_context_wait_for_completion(context);
439 } else {
440 spi_context_cs_control(context, false);
441 }
442 #else
443 do {
444 ret = spi_nxp_s32_transfer_next_packet(dev);
445
446 if (!ret) {
447 spi_context_update_tx(context, 1U, data->transfer_len);
448 spi_context_update_rx(context, 1U, data->transfer_len);
449 }
450 } while (!ret && !spi_nxp_s32_transfer_done(context));
451
452 spi_context_cs_control(context, false);
453
454 #ifdef CONFIG_SPI_SLAVE
455 if (spi_context_is_slave(context) && !ret) {
456 ret = data->ctx.recv_frames;
457 }
458 #endif /* CONFIG_SPI_SLAVE */
459 #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */
460
461 spi_context_release(context, ret);
462
463 return ret;
464 }
465
spi_nxp_s32_transceive(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)466 static int spi_nxp_s32_transceive(const struct device *dev,
467 const struct spi_config *spi_cfg,
468 const struct spi_buf_set *tx_bufs,
469 const struct spi_buf_set *rx_bufs)
470 {
471 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, false, NULL, NULL);
472 }
473 #ifdef CONFIG_SPI_ASYNC
spi_nxp_s32_transceive_async(const struct device * dev,const struct spi_config * spi_cfg,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t callback,void * userdata)474 static int spi_nxp_s32_transceive_async(const struct device *dev,
475 const struct spi_config *spi_cfg,
476 const struct spi_buf_set *tx_bufs,
477 const struct spi_buf_set *rx_bufs,
478 spi_callback_t callback,
479 void *userdata)
480 {
481 return transceive(dev, spi_cfg, tx_bufs, rx_bufs, true, callback, userdata);
482 }
483 #endif /* CONFIG_SPI_ASYNC */
484
spi_nxp_s32_release(const struct device * dev,const struct spi_config * spi_cfg)485 static int spi_nxp_s32_release(const struct device *dev,
486 const struct spi_config *spi_cfg)
487 {
488 struct spi_nxp_s32_data *data = dev->data;
489
490 (void)spi_cfg;
491
492 spi_context_unlock_unconditionally(&data->ctx);
493
494 return 0;
495 }
496
spi_nxp_s32_init(const struct device * dev)497 static int spi_nxp_s32_init(const struct device *dev)
498 {
499 const struct spi_nxp_s32_config *config = dev->config;
500 struct spi_nxp_s32_data *data = dev->data;
501 uint32_t clock_rate;
502 uint8_t scaler, prescaler;
503
504 uint32_t ctar = 0;
505 int ret = 0;
506
507 if (!device_is_ready(config->clock_dev)) {
508 LOG_ERR("Clock control device not ready");
509 return -ENODEV;
510 }
511
512 ret = clock_control_on(config->clock_dev, config->clock_subsys);
513 if (ret) {
514 LOG_ERR("Failed to enable clock");
515 return ret;
516 }
517
518 ret = clock_control_get_rate(config->clock_dev, config->clock_subsys, &clock_rate);
519 if (ret) {
520 LOG_ERR("Failed to get clock frequency");
521 return ret;
522 }
523
524 ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
525 if (ret < 0) {
526 return ret;
527 }
528
529 if (Spi_Ip_Init(config->spi_hw_cfg)) {
530 return -EBUSY;
531 }
532
533 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
534 if (Spi_Ip_UpdateTransferMode(config->spi_hw_cfg->Instance, SPI_IP_INTERRUPT)) {
535 return -EBUSY;
536 }
537
538 config->irq_config_func(dev);
539 #endif /* CONFIG_NXP_S32_SPI_INTERRUPT */
540
541 /*
542 * Update the delay timings configuration that are
543 * applied for all inner CS signals of SPI module.
544 */
545 spi_nxp_s32_getbestdelay(clock_rate, config->sck_cs_delay, &scaler, &prescaler);
546
547 ctar |= SPI_CTAR_ASC(scaler) | SPI_CTAR_PASC(prescaler);
548
549 spi_nxp_s32_getbestdelay(clock_rate, config->cs_sck_delay, &scaler, &prescaler);
550
551 ctar |= SPI_CTAR_CSSCK(scaler) | SPI_CTAR_PCSSCK(prescaler);
552
553 spi_nxp_s32_getbestdelay(clock_rate, config->cs_cs_delay, &scaler, &prescaler);
554
555 ctar |= SPI_CTAR_DT(scaler) | SPI_CTAR_PDT(prescaler);
556
557 data->transfer_cfg.Ctar |= ctar;
558 data->transfer_cfg.DeviceParams = &data->transfer_params;
559
560 ret = spi_context_cs_configure_all(&data->ctx);
561 if (ret < 0) {
562 return ret;
563 }
564
565 spi_context_unlock_unconditionally(&data->ctx);
566
567 return 0;
568 }
569
570
571 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
spi_nxp_s32_isr(const struct device * dev)572 void spi_nxp_s32_isr(const struct device *dev)
573 {
574 const struct spi_nxp_s32_config *config = dev->config;
575
576 Spi_Ip_IrqHandler(config->spi_hw_cfg->Instance);
577 }
578
spi_nxp_s32_transfer_callback(const struct device * dev,Spi_Ip_EventType event)579 static void spi_nxp_s32_transfer_callback(const struct device *dev, Spi_Ip_EventType event)
580 {
581 struct spi_nxp_s32_data *data = dev->data;
582 int ret = 0;
583
584 if (event == SPI_IP_EVENT_END_TRANSFER) {
585 spi_context_update_tx(&data->ctx, 1U, data->transfer_len);
586 spi_context_update_rx(&data->ctx, 1U, data->transfer_len);
587
588 if (spi_nxp_s32_transfer_done(&data->ctx)) {
589 spi_context_complete(&data->ctx, dev, 0);
590 spi_context_cs_control(&data->ctx, false);
591 } else {
592 ret = spi_nxp_s32_transfer_next_packet(dev);
593 }
594 } else {
595 LOG_ERR("Failing in transfer_callback");
596 ret = -EIO;
597 }
598
599 if (ret) {
600 spi_context_complete(&data->ctx, dev, ret);
601 spi_context_cs_control(&data->ctx, false);
602 }
603 }
604 #endif /*CONFIG_NXP_S32_SPI_INTERRUPT*/
605
606 static const struct spi_driver_api spi_nxp_s32_driver_api = {
607 .transceive = spi_nxp_s32_transceive,
608 #ifdef CONFIG_SPI_ASYNC
609 .transceive_async = spi_nxp_s32_transceive_async,
610 #endif
611 .release = spi_nxp_s32_release,
612 };
613
614 #define SPI_NXP_S32_HW_INSTANCE_CHECK(i, n) \
615 ((DT_INST_REG_ADDR(n) == IP_SPI_##i##_BASE) ? i : 0)
616
617 #define SPI_NXP_S32_HW_INSTANCE(n) \
618 LISTIFY(__DEBRACKET SPI_INSTANCE_COUNT, SPI_NXP_S32_HW_INSTANCE_CHECK, (|), n)
619
620 #define SPI_NXP_S32_NUM_CS(n) DT_INST_PROP(n, num_cs)
621 #define SPI_NXP_S32_IS_MASTER(n) !DT_INST_PROP(n, slave)
622
623 #ifdef CONFIG_SPI_SLAVE
624 #define SPI_NXP_S32_SET_SLAVE(n) .SlaveMode = DT_INST_PROP(n, slave),
625 #else
626 #define SPI_NXP_S32_SET_SLAVE(n)
627 #endif
628
629 #ifdef CONFIG_NXP_S32_SPI_INTERRUPT
630
631 #define SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n) \
632 .irq_config_func = spi_nxp_s32_config_func_##n,
633
634 #define SPI_NXP_S32_INTERRUPT_DEFINE(n) \
635 static void spi_nxp_s32_config_func_##n(const struct device *dev) \
636 { \
637 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), \
638 spi_nxp_s32_isr, DEVICE_DT_INST_GET(n), \
639 DT_INST_IRQ(n, flags)); \
640 irq_enable(DT_INST_IRQN(n)); \
641 }
642
643 #define SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n) \
644 .cb = spi_nxp_s32_##n##_callback,
645
646 #define SPI_NXP_S32_CALLBACK_DEFINE(n) \
647 static void spi_nxp_s32_##n##_callback(uint8 instance, Spi_Ip_EventType event) \
648 { \
649 ARG_UNUSED(instance); \
650 const struct device *dev = DEVICE_DT_INST_GET(n); \
651 \
652 spi_nxp_s32_transfer_callback(dev, event); \
653 }
654 #else
655 #define SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n)
656 #define SPI_NXP_S32_INTERRUPT_DEFINE(n)
657 #define SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n)
658 #define SPI_NXP_S32_CALLBACK_DEFINE(n)
659 #endif /*CONFIG_NXP_S32_SPI_INTERRUPT*/
660
661 /*
662 * Declare the default configuration for SPI driver, no DMA
663 * support, all inner module Chip Selects are active low.
664 */
665 #define SPI_NXP_S32_INSTANCE_CONFIG(n) \
666 static const Spi_Ip_ConfigType spi_nxp_s32_default_config_##n = { \
667 .Instance = SPI_NXP_S32_HW_INSTANCE(n), \
668 .Mcr = (SPI_MCR_MSTR(SPI_NXP_S32_IS_MASTER(n)) | \
669 SPI_MCR_CONT_SCKE(0U) | SPI_MCR_FRZ(0U) | \
670 SPI_MCR_MTFE(0U) | SPI_MCR_SMPL_PT(0U) | \
671 SPI_MCR_PCSIS(BIT_MASK(SPI_NXP_S32_NUM_CS(n))) | \
672 SPI_MCR_MDIS(0U) | SPI_MCR_XSPI(1U) | SPI_MCR_HALT(1U)), \
673 .TransferMode = SPI_IP_POLLING, \
674 .StateIndex = n, \
675 SPI_NXP_S32_SET_SLAVE(n) \
676 }
677
678 #define SPI_NXP_S32_TRANSFER_CONFIG(n) \
679 .transfer_cfg = { \
680 .Instance = SPI_NXP_S32_HW_INSTANCE(n), \
681 .Ctare = SPI_CTARE_FMSZE(0U) | SPI_CTARE_DTCP(1U), \
682 }
683
684 #define SPI_NXP_S32_DEVICE(n) \
685 PINCTRL_DT_INST_DEFINE(n); \
686 SPI_NXP_S32_CALLBACK_DEFINE(n) \
687 SPI_NXP_S32_INTERRUPT_DEFINE(n) \
688 SPI_NXP_S32_INSTANCE_CONFIG(n); \
689 static const struct spi_nxp_s32_config spi_nxp_s32_config_##n = { \
690 .num_cs = SPI_NXP_S32_NUM_CS(n), \
691 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
692 .clock_subsys = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, name), \
693 .sck_cs_delay = DT_INST_PROP_OR(n, spi_sck_cs_delay, 0U), \
694 .cs_sck_delay = DT_INST_PROP_OR(n, spi_cs_sck_delay, 0U), \
695 .cs_cs_delay = DT_INST_PROP_OR(n, spi_cs_cs_delay, 0U), \
696 .spi_hw_cfg = (Spi_Ip_ConfigType *)&spi_nxp_s32_default_config_##n, \
697 .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n), \
698 SPI_NXP_S32_CONFIG_CALLBACK_FUNC(n) \
699 SPI_NXP_S32_CONFIG_INTERRUPT_FUNC(n) \
700 }; \
701 static struct spi_nxp_s32_data spi_nxp_s32_data_##n = { \
702 SPI_NXP_S32_TRANSFER_CONFIG(n), \
703 SPI_CONTEXT_INIT_LOCK(spi_nxp_s32_data_##n, ctx), \
704 SPI_CONTEXT_INIT_SYNC(spi_nxp_s32_data_##n, ctx), \
705 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(n), ctx) \
706 }; \
707 DEVICE_DT_INST_DEFINE(n, \
708 &spi_nxp_s32_init, NULL, \
709 &spi_nxp_s32_data_##n, &spi_nxp_s32_config_##n, \
710 POST_KERNEL, CONFIG_SPI_INIT_PRIORITY, \
711 &spi_nxp_s32_driver_api);
712
713 DT_INST_FOREACH_STATUS_OKAY(SPI_NXP_S32_DEVICE)
714