1 /*
2 * Copyright (c) 2024 Renesas Electronics Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT renesas_ra8_spi_b
8
9 #include <zephyr/drivers/spi.h>
10 #include <zephyr/drivers/pinctrl.h>
11 #include <zephyr/drivers/clock_control/renesas_ra_cgc.h>
12 #include <zephyr/irq.h>
13 #include <soc.h>
14 #include <instances/r_dtc.h>
15 #include <instances/r_spi_b.h>
16
17 #include <zephyr/logging/log.h>
18 LOG_MODULE_REGISTER(ra8_spi_b);
19
20 #include "spi_context.h"
21
22 #if defined(CONFIG_SPI_B_INTERRUPT)
23 void spi_b_rxi_isr(void);
24 void spi_b_txi_isr(void);
25 void spi_b_tei_isr(void);
26 void spi_b_eri_isr(void);
27 #endif
28
29 struct ra_spi_config {
30 const struct pinctrl_dev_config *pcfg;
31 const struct device *clock_dev;
32 const struct clock_control_ra_subsys_cfg clock_subsys;
33 };
34
35 struct ra_spi_data {
36 struct spi_context ctx;
37 uint8_t dfs;
38 struct st_spi_b_instance_ctrl spi;
39 struct st_spi_cfg fsp_config;
40 struct st_spi_b_extended_cfg fsp_config_extend;
41 #if CONFIG_SPI_B_INTERRUPT
42 uint32_t data_len;
43 #endif
44 #if defined(CONFIG_SPI_B_RA_DTC)
45 /* RX */
46 struct st_transfer_instance rx_transfer;
47 struct st_dtc_instance_ctrl rx_transfer_ctrl;
48 struct st_transfer_info rx_transfer_info;
49 struct st_transfer_cfg rx_transfer_cfg;
50 struct st_dtc_extended_cfg rx_transfer_cfg_extend;
51
52 /* TX */
53 struct st_transfer_instance tx_transfer;
54 struct st_dtc_instance_ctrl tx_transfer_ctrl;
55 struct st_transfer_info tx_transfer_info;
56 struct st_transfer_cfg tx_transfer_cfg;
57 struct st_dtc_extended_cfg tx_transfer_cfg_extend;
58 #endif
59 };
60
spi_cb(spi_callback_args_t * p_args)61 static void spi_cb(spi_callback_args_t *p_args)
62 {
63 struct device *dev = (struct device *)p_args->p_context;
64 struct ra_spi_data *data = dev->data;
65
66 switch (p_args->event) {
67 case SPI_EVENT_TRANSFER_COMPLETE:
68 spi_context_cs_control(&data->ctx, false);
69 spi_context_complete(&data->ctx, dev, 0);
70 break;
71 case SPI_EVENT_ERR_MODE_FAULT: /* Mode fault error */
72 case SPI_EVENT_ERR_READ_OVERFLOW: /* Read overflow error */
73 case SPI_EVENT_ERR_PARITY: /* Parity error */
74 case SPI_EVENT_ERR_OVERRUN: /* Overrun error */
75 case SPI_EVENT_ERR_FRAMING: /* Framing error */
76 case SPI_EVENT_ERR_MODE_UNDERRUN: /* Underrun error */
77 spi_context_cs_control(&data->ctx, false);
78 spi_context_complete(&data->ctx, dev, -EIO);
79 break;
80 default:
81 break;
82 }
83 }
84
ra_spi_b_configure(const struct device * dev,const struct spi_config * config)85 static int ra_spi_b_configure(const struct device *dev, const struct spi_config *config)
86 {
87 struct ra_spi_data *data = dev->data;
88 fsp_err_t fsp_err;
89
90 if (spi_context_configured(&data->ctx, config)) {
91 /* Nothing to do */
92 return 0;
93 }
94
95 if (data->spi.open != 0) {
96 R_SPI_B_Close(&data->spi);
97 }
98
99 if ((config->operation & SPI_FRAME_FORMAT_TI) == SPI_FRAME_FORMAT_TI) {
100 return -ENOTSUP;
101 }
102
103 if (config->operation & SPI_OP_MODE_SLAVE) {
104 data->fsp_config.operating_mode = SPI_MODE_SLAVE;
105 } else {
106 data->fsp_config.operating_mode = SPI_MODE_MASTER;
107 }
108
109 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
110 data->fsp_config.clk_polarity = SPI_CLK_POLARITY_HIGH;
111 } else {
112 data->fsp_config.clk_polarity = SPI_CLK_POLARITY_LOW;
113 }
114
115 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
116 data->fsp_config.clk_phase = SPI_CLK_PHASE_EDGE_EVEN;
117 } else {
118 data->fsp_config.clk_phase = SPI_CLK_PHASE_EDGE_ODD;
119 }
120
121 if (config->operation & SPI_TRANSFER_LSB) {
122 data->fsp_config.bit_order = SPI_BIT_ORDER_LSB_FIRST;
123 } else {
124 data->fsp_config.bit_order = SPI_BIT_ORDER_MSB_FIRST;
125 }
126
127 if (config->frequency > 0) {
128 fsp_err = R_SPI_B_CalculateBitrate(config->frequency,
129 data->fsp_config_extend.clock_source,
130 &data->fsp_config_extend.spck_div);
131 __ASSERT(fsp_err == 0, "spi_b: spi frequency calculate error: %d", fsp_err);
132 }
133
134 data->fsp_config_extend.spi_comm = SPI_B_COMMUNICATION_FULL_DUPLEX;
135 if (spi_cs_is_gpio(config) || !IS_ENABLED(CONFIG_SPI_B_USE_HW_SS)) {
136 data->fsp_config_extend.spi_clksyn = SPI_B_SSL_MODE_CLK_SYN;
137 } else {
138 data->fsp_config_extend.spi_clksyn = SPI_B_SSL_MODE_SPI;
139 data->fsp_config_extend.ssl_select = SPI_B_SSL_SELECT_SSL0;
140 }
141
142 data->fsp_config.p_extend = &data->fsp_config_extend;
143
144 data->fsp_config.p_callback = spi_cb;
145 data->fsp_config.p_context = dev;
146 fsp_err = R_SPI_B_Open(&data->spi, &data->fsp_config);
147 if (fsp_err != FSP_SUCCESS) {
148 LOG_ERR("R_SPI_B_Open error: %d", fsp_err);
149 return -EINVAL;
150 }
151 data->ctx.config = config;
152
153 return 0;
154 }
155
ra_spi_b_transfer_ongoing(struct ra_spi_data * data)156 static bool ra_spi_b_transfer_ongoing(struct ra_spi_data *data)
157 {
158 #if defined(CONFIG_SPI_B_INTERRUPT)
159 return (spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx));
160 #else
161 if (spi_context_total_tx_len(&data->ctx) < spi_context_total_rx_len(&data->ctx)) {
162 return (spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx));
163 } else {
164 return (spi_context_tx_on(&data->ctx) && spi_context_rx_on(&data->ctx));
165 }
166 #endif
167 }
168
169 #ifndef CONFIG_SPI_B_INTERRUPT
ra_spi_b_transceive_slave(struct ra_spi_data * data)170 static int ra_spi_b_transceive_slave(struct ra_spi_data *data)
171 {
172 R_SPI_B0_Type *p_spi_reg = data->spi.p_regs;
173
174 if (p_spi_reg->SPSR_b.SPTEF && spi_context_tx_on(&data->ctx)) {
175 uint32_t tx;
176
177 if (data->ctx.tx_buf != NULL) {
178 if (data->dfs > 2) {
179 tx = *(uint32_t *)(data->ctx.tx_buf);
180 } else if (data->dfs > 1) {
181 tx = *(uint16_t *)(data->ctx.tx_buf);
182 } else {
183 tx = *(uint8_t *)(data->ctx.tx_buf);
184 }
185 } else {
186 tx = 0;
187 }
188 /* Clear Transmit Empty flag */
189 p_spi_reg->SPSRC = R_SPI_B0_SPSRC_SPTEFC_Msk;
190
191 p_spi_reg->SPDR = tx;
192
193 spi_context_update_tx(&data->ctx, data->dfs, 1);
194 } else {
195 p_spi_reg->SPCR_b.SPTIE = 0;
196 }
197
198 if (p_spi_reg->SPSR_b.SPRF && spi_context_rx_buf_on(&data->ctx)) {
199 uint32_t rx;
200
201 rx = p_spi_reg->SPDR;
202 /* Clear Receive Full flag */
203 p_spi_reg->SPSRC = R_SPI_B0_SPSRC_SPRFC_Msk;
204 if (data->dfs > 2) {
205 UNALIGNED_PUT(rx, (uint32_t *)data->ctx.rx_buf);
206 } else if (data->dfs > 1) {
207 UNALIGNED_PUT(rx, (uint16_t *)data->ctx.rx_buf);
208 } else {
209 UNALIGNED_PUT(rx, (uint8_t *)data->ctx.rx_buf);
210 }
211 spi_context_update_rx(&data->ctx, data->dfs, 1);
212 }
213
214 return 0;
215 }
216
ra_spi_b_transceive_master(struct ra_spi_data * data)217 static int ra_spi_b_transceive_master(struct ra_spi_data *data)
218 {
219 R_SPI_B0_Type *p_spi_reg = data->spi.p_regs;
220 uint32_t tx;
221 uint32_t rx;
222
223 /* Tx transfer*/
224 if (spi_context_tx_buf_on(&data->ctx)) {
225 if (data->dfs > 2) {
226 tx = *(uint32_t *)(data->ctx.tx_buf);
227 } else if (data->dfs > 1) {
228 tx = *(uint16_t *)(data->ctx.tx_buf);
229 } else {
230 tx = *(uint8_t *)(data->ctx.tx_buf);
231 }
232 } else {
233 tx = 0U;
234 }
235
236 while (!p_spi_reg->SPSR_b.SPTEF) {
237 }
238
239 p_spi_reg->SPDR = tx;
240
241 /* Clear Transmit Empty flag */
242 p_spi_reg->SPSRC = R_SPI_B0_SPSRC_SPTEFC_Msk;
243 spi_context_update_tx(&data->ctx, data->dfs, 1);
244
245 /* Rx receive */
246 if (spi_context_rx_on(&data->ctx)) {
247 while (!p_spi_reg->SPSR_b.SPRF) {
248 }
249 rx = p_spi_reg->SPDR;
250 /* Clear Receive Full flag */
251 p_spi_reg->SPSRC = R_SPI_B0_SPSRC_SPRFC_Msk;
252 if (data->dfs > 2) {
253 UNALIGNED_PUT(rx, (uint32_t *)data->ctx.rx_buf);
254 } else if (data->dfs > 1) {
255 UNALIGNED_PUT(rx, (uint16_t *)data->ctx.rx_buf);
256 } else {
257 UNALIGNED_PUT(rx, (uint8_t *)data->ctx.rx_buf);
258 }
259 spi_context_update_rx(&data->ctx, data->dfs, 1);
260 }
261
262 return 0;
263 }
264
ra_spi_b_transceive_data(struct ra_spi_data * data)265 static int ra_spi_b_transceive_data(struct ra_spi_data *data)
266 {
267 uint16_t operation = data->ctx.config->operation;
268
269 if (SPI_OP_MODE_GET(operation) == SPI_OP_MODE_MASTER) {
270 ra_spi_b_transceive_master(data);
271 } else {
272 ra_spi_b_transceive_slave(data);
273 }
274
275 return 0;
276 }
277 #endif
278
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)279 static int transceive(const struct device *dev, const struct spi_config *config,
280 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
281 bool asynchronous, spi_callback_t cb, void *userdata)
282 {
283 struct ra_spi_data *data = dev->data;
284 R_SPI_B0_Type *p_spi_reg;
285 int ret = 0;
286
287 if (!tx_bufs && !rx_bufs) {
288 return 0;
289 }
290
291 #ifndef CONFIG_SPI_B_INTERRUPT
292 if (asynchronous) {
293 return -ENOTSUP;
294 }
295 #endif
296
297 spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
298
299 ret = ra_spi_b_configure(dev, config);
300 if (ret) {
301 goto end;
302 }
303 data->dfs = ((SPI_WORD_SIZE_GET(config->operation) - 1) / 8) + 1;
304 p_spi_reg = data->spi.p_regs;
305 /* Set buffers info */
306 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs);
307
308 spi_context_cs_control(&data->ctx, true);
309
310 #ifdef CONFIG_SPI_B_INTERRUPT
311 spi_bit_width_t spi_width =
312 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
313
314 if (data->ctx.rx_len == 0) {
315 data->data_len = spi_context_is_slave(&data->ctx)
316 ? spi_context_total_tx_len(&data->ctx)
317 : data->ctx.tx_len;
318 } else if (data->ctx.tx_len == 0) {
319 data->data_len = spi_context_is_slave(&data->ctx)
320 ? spi_context_total_rx_len(&data->ctx)
321 : data->ctx.rx_len;
322 } else {
323 data->data_len = spi_context_is_slave(&data->ctx)
324 ? MAX(spi_context_total_tx_len(&data->ctx),
325 spi_context_total_rx_len(&data->ctx))
326 : MIN(data->ctx.tx_len, data->ctx.rx_len);
327 }
328
329 if (data->ctx.rx_buf == NULL) {
330 R_SPI_B_Write(&data->spi, data->ctx.tx_buf, data->data_len, spi_width);
331 } else if (data->ctx.tx_buf == NULL) {
332 R_SPI_B_Read(&data->spi, data->ctx.rx_buf, data->data_len, spi_width);
333 } else {
334 R_SPI_B_WriteRead(&data->spi, data->ctx.tx_buf, data->ctx.rx_buf, data->data_len,
335 spi_width);
336 }
337 ret = spi_context_wait_for_completion(&data->ctx);
338
339 #else
340 p_spi_reg->SPCR_b.TXMD = 0x0; /* tx - rx*/
341 if (!spi_context_tx_on(&data->ctx)) {
342 p_spi_reg->SPCR_b.TXMD = 0x2; /* rx only */
343 }
344 if (!spi_context_rx_on(&data->ctx)) {
345 p_spi_reg->SPCR_b.TXMD = 0x1; /* tx only */
346 }
347
348 /* Clear FIFOs */
349 p_spi_reg->SPFCR = 1;
350
351 /* Enable the SPI Transfer. */
352 p_spi_reg->SPCR_b.SPE = 1;
353 p_spi_reg->SPCMD0 |= (uint32_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1)
354 << R_SPI_B0_SPCMD0_SPB_Pos;
355 do {
356 ra_spi_b_transceive_data(data);
357 } while (ra_spi_b_transfer_ongoing(data));
358
359 /* Wait for transmision complete */
360 while (p_spi_reg->SPSR_b.IDLNF) {
361 }
362
363 /* Disable the SPI Transfer. */
364 p_spi_reg->SPCR_b.SPE = 0;
365 #endif
366 #ifdef CONFIG_SPI_SLAVE
367 if (spi_context_is_slave(&data->ctx) && !ret) {
368 ret = data->ctx.recv_frames;
369 }
370 #endif /* CONFIG_SPI_SLAVE */
371
372 end:
373 spi_context_release(&data->ctx, ret);
374
375 return ret;
376 }
377
ra_spi_b_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)378 static int ra_spi_b_transceive(const struct device *dev, const struct spi_config *config,
379 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
380 {
381 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
382 }
383
384 #ifdef CONFIG_SPI_ASYNC
ra_spi_b_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)385 static int ra_spi_b_transceive_async(const struct device *dev, const struct spi_config *config,
386 const struct spi_buf_set *tx_bufs,
387 const struct spi_buf_set *rx_bufs, spi_callback_t cb,
388 void *userdata)
389 {
390 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
391 }
392 #endif /* CONFIG_SPI_ASYNC */
393
ra_spi_b_release(const struct device * dev,const struct spi_config * config)394 static int ra_spi_b_release(const struct device *dev, const struct spi_config *config)
395 {
396 struct ra_spi_data *data = dev->data;
397
398 spi_context_unlock_unconditionally(&data->ctx);
399
400 return 0;
401 }
402
403 static DEVICE_API(spi, ra_spi_driver_api) = {.transceive = ra_spi_b_transceive,
404 #ifdef CONFIG_SPI_ASYNC
405 .transceive_async =
406 ra_spi_b_transceive_async,
407 #endif /* CONFIG_SPI_ASYNC */
408 .release = ra_spi_b_release};
409
ra_spi_b_clock_name(const struct device * clock_dev)410 static spi_b_clock_source_t ra_spi_b_clock_name(const struct device *clock_dev)
411 {
412 const char *clock_dev_name = clock_dev->name;
413
414 if (strcmp(clock_dev_name, "spiclk") == 0 || strcmp(clock_dev_name, "scispiclk") == 0) {
415 return SPI_B_CLOCK_SOURCE_SCISPICLK;
416 }
417
418 return SPI_B_CLOCK_SOURCE_PCLK;
419 }
420
spi_b_ra_init(const struct device * dev)421 static int spi_b_ra_init(const struct device *dev)
422 {
423 const struct ra_spi_config *config = dev->config;
424 struct ra_spi_data *data = dev->data;
425 int ret;
426
427 if (!device_is_ready(config->clock_dev)) {
428 return -ENODEV;
429 }
430
431 data->fsp_config_extend.clock_source = ra_spi_b_clock_name(config->clock_dev);
432
433 /* Configure dt provided device signals when available */
434 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
435 if (ret < 0) {
436 return ret;
437 }
438
439 ret = spi_context_cs_configure_all(&data->ctx);
440 if (ret < 0) {
441 return ret;
442 }
443
444 spi_context_unlock_unconditionally(&data->ctx);
445
446 return 0;
447 }
448
449 #if defined(CONFIG_SPI_B_INTERRUPT)
450
ra_spi_retransmit(struct ra_spi_data * data)451 static void ra_spi_retransmit(struct ra_spi_data *data)
452 {
453 spi_bit_width_t spi_width =
454 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
455
456 if (data->ctx.rx_len == 0) {
457 data->data_len = data->ctx.tx_len;
458 data->spi.p_tx_data = data->ctx.tx_buf;
459 data->spi.p_rx_data = NULL;
460 } else if (data->ctx.tx_len == 0) {
461 data->data_len = data->ctx.rx_len;
462 data->spi.p_tx_data = NULL;
463 data->spi.p_rx_data = data->ctx.rx_buf;
464 } else {
465 data->data_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
466 data->spi.p_tx_data = data->ctx.tx_buf;
467 data->spi.p_rx_data = data->ctx.rx_buf;
468 }
469
470 data->spi.bit_width = spi_width;
471 data->spi.rx_count = 0;
472 data->spi.tx_count = 0;
473 data->spi.count = data->data_len;
474 #ifdef CONFIG_SPI_B_RA_DTC
475 /* Determine DTC transfer size */
476 transfer_size_t size;
477
478 if (spi_width > SPI_BIT_WIDTH_16_BITS) { /* Bit Widths of 17-32 bits */
479 size = TRANSFER_SIZE_4_BYTE;
480 } else if (spi_width > SPI_BIT_WIDTH_8_BITS) { /* Bit Widths of 9-16 bits*/
481 size = TRANSFER_SIZE_2_BYTE;
482 } else { /* Bit Widths of 4-8 bits */
483 size = TRANSFER_SIZE_1_BYTE;
484 }
485
486 if (data->spi.p_cfg->p_transfer_rx) {
487 /* When the rxi interrupt is called, all transfers will be finished. */
488 data->spi.rx_count = data->data_len;
489
490 transfer_instance_t *p_transfer_rx =
491 (transfer_instance_t *)data->spi.p_cfg->p_transfer_rx;
492 transfer_info_t *p_info = p_transfer_rx->p_cfg->p_info;
493
494 /* Configure the receive DMA instance. */
495 p_info->transfer_settings_word_b.size = size;
496 p_info->length = (uint16_t)data->data_len;
497 p_info->transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED;
498 p_info->p_dest = data->ctx.rx_buf;
499
500 if (NULL == data->ctx.rx_buf) {
501 static uint32_t dummy_rx;
502
503 p_info->transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED;
504 p_info->p_dest = &dummy_rx;
505 }
506
507 p_transfer_rx->p_api->reconfigure(p_transfer_rx->p_ctrl, p_info);
508 }
509
510 if (data->spi.p_cfg->p_transfer_tx) {
511 /* When the txi interrupt is called, all transfers will be finished. */
512 data->spi.tx_count = data->data_len;
513
514 transfer_instance_t *p_transfer_tx =
515 (transfer_instance_t *)data->spi.p_cfg->p_transfer_tx;
516 transfer_info_t *p_info = p_transfer_tx->p_cfg->p_info;
517
518 /* Configure the transmit DMA instance. */
519 p_info->transfer_settings_word_b.size = size;
520 p_info->length = (uint16_t)data->data_len;
521 p_info->transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED;
522 p_info->p_src = data->ctx.tx_buf;
523
524 if (NULL == data->ctx.tx_buf) {
525 static uint32_t dummy_tx;
526
527 p_info->transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED;
528 p_info->p_src = &dummy_tx;
529 }
530
531 p_transfer_tx->p_api->reconfigure(p_transfer_tx->p_ctrl, p_info);
532 }
533 #endif
534 data->spi.p_regs->SPSRC = R_SPI_B0_SPSRC_SPTEFC_Msk;
535 }
536
ra_spi_rxi_isr(const struct device * dev)537 static void ra_spi_rxi_isr(const struct device *dev)
538 {
539 #ifndef CONFIG_SPI_SLAVE
540 ARG_UNUSED(dev);
541 spi_b_rxi_isr();
542 #else
543 struct ra_spi_data *data = dev->data;
544
545 spi_b_rxi_isr();
546 if (spi_context_is_slave(&data->ctx) && data->spi.rx_count == data->spi.count) {
547
548 if (data->ctx.rx_buf != NULL && data->ctx.tx_buf != NULL) {
549 data->ctx.recv_frames = MIN(spi_context_total_tx_len(&data->ctx),
550 spi_context_total_rx_len(&data->ctx));
551 } else if (data->ctx.tx_buf == NULL) {
552 data->ctx.recv_frames = data->data_len;
553 } else {
554 /* Do nothing */
555 }
556
557 R_BSP_IrqDisable(data->fsp_config.tei_irq);
558
559 /* Writing 0 to SPE generatates a TXI IRQ. Disable the TXI IRQ.
560 * (See Section 38.2.1 SPI Control Register in the RA6T2 manual R01UH0886EJ0100).
561 */
562 R_BSP_IrqDisable(data->fsp_config.txi_irq);
563
564 /* Disable the SPI Transfer. */
565 data->spi.p_regs->SPCR_b.SPE = 0;
566
567 /* Re-enable the TXI IRQ and clear the pending IRQ. */
568 R_BSP_IrqEnable(data->fsp_config.txi_irq);
569
570 spi_context_cs_control(&data->ctx, false);
571 spi_context_complete(&data->ctx, dev, 0);
572 }
573 #endif
574 }
575
ra_spi_txi_isr(const struct device * dev)576 static void ra_spi_txi_isr(const struct device *dev)
577 {
578 ARG_UNUSED(dev);
579 spi_b_txi_isr();
580 }
581
ra_spi_tei_isr(const struct device * dev)582 static void ra_spi_tei_isr(const struct device *dev)
583 {
584 struct ra_spi_data *data = dev->data;
585
586 if (data->spi.rx_count == data->spi.count) {
587 spi_context_update_rx(&data->ctx, 1, data->data_len);
588 }
589 if (data->spi.tx_count == data->spi.count) {
590 spi_context_update_tx(&data->ctx, 1, data->data_len);
591 }
592 if (ra_spi_b_transfer_ongoing(data)) {
593 R_ICU->IELSR_b[data->fsp_config.tei_irq].IR = 0U;
594 ra_spi_retransmit(data);
595 } else {
596 spi_b_tei_isr();
597 }
598 }
599
ra_spi_eri_isr(const struct device * dev)600 static void ra_spi_eri_isr(const struct device *dev)
601 {
602 ARG_UNUSED(dev);
603 spi_b_eri_isr();
604 }
605 #endif
606
607 #define _ELC_EVENT_SPI_RXI(channel) ELC_EVENT_SPI##channel##_RXI
608 #define _ELC_EVENT_SPI_TXI(channel) ELC_EVENT_SPI##channel##_TXI
609 #define _ELC_EVENT_SPI_TEI(channel) ELC_EVENT_SPI##channel##_TEI
610 #define _ELC_EVENT_SPI_ERI(channel) ELC_EVENT_SPI##channel##_ERI
611
612 #define ELC_EVENT_SPI_RXI(channel) _ELC_EVENT_SPI_RXI(channel)
613 #define ELC_EVENT_SPI_TXI(channel) _ELC_EVENT_SPI_TXI(channel)
614 #define ELC_EVENT_SPI_TEI(channel) _ELC_EVENT_SPI_TEI(channel)
615 #define ELC_EVENT_SPI_ERI(channel) _ELC_EVENT_SPI_ERI(channel)
616
617 #if defined(CONFIG_SPI_B_INTERRUPT)
618
619 #define RA_SPI_B_IRQ_CONFIG_INIT(index) \
620 do { \
621 ARG_UNUSED(dev); \
622 \
623 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, rxi, irq)] = \
624 ELC_EVENT_SPI_RXI(DT_INST_PROP(index, channel)); \
625 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, txi, irq)] = \
626 ELC_EVENT_SPI_TXI(DT_INST_PROP(index, channel)); \
627 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, tei, irq)] = \
628 ELC_EVENT_SPI_TEI(DT_INST_PROP(index, channel)); \
629 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, eri, irq)] = \
630 ELC_EVENT_SPI_ERI(DT_INST_PROP(index, channel)); \
631 \
632 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, rxi, irq), \
633 DT_INST_IRQ_BY_NAME(index, rxi, priority), ra_spi_rxi_isr, \
634 DEVICE_DT_INST_GET(index), 0); \
635 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, txi, irq), \
636 DT_INST_IRQ_BY_NAME(index, txi, priority), ra_spi_txi_isr, \
637 DEVICE_DT_INST_GET(index), 0); \
638 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, tei, irq), \
639 DT_INST_IRQ_BY_NAME(index, tei, priority), ra_spi_tei_isr, \
640 DEVICE_DT_INST_GET(index), 0); \
641 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, eri, irq), \
642 DT_INST_IRQ_BY_NAME(index, eri, priority), ra_spi_eri_isr, \
643 DEVICE_DT_INST_GET(index), 0); \
644 \
645 irq_enable(DT_INST_IRQ_BY_NAME(index, rxi, irq)); \
646 irq_enable(DT_INST_IRQ_BY_NAME(index, txi, irq)); \
647 irq_enable(DT_INST_IRQ_BY_NAME(index, eri, irq)); \
648 } while (0)
649
650 #else
651
652 #define RA_SPI_B_IRQ_CONFIG_INIT(index)
653
654 #endif
655
656 #ifndef CONFIG_SPI_B_RA_DTC
657 #define RA_SPI_B_DTC_STRUCT_INIT(index)
658 #define RA_SPI_B_DTC_INIT(index)
659 #else
660 #define RA_SPI_B_DTC_INIT(index) \
661 do { \
662 if (DT_INST_PROP_OR(index, rx_dtc, false)) { \
663 ra_spi_data_##index.fsp_config.p_transfer_rx = \
664 &ra_spi_data_##index.rx_transfer; \
665 } \
666 if (DT_INST_PROP_OR(index, tx_dtc, false)) { \
667 ra_spi_data_##index.fsp_config.p_transfer_tx = \
668 &ra_spi_data_##index.tx_transfer; \
669 } \
670 } while (0)
671 #define RA_SPI_B_DTC_STRUCT_INIT(index) \
672 .rx_transfer_info = \
673 { \
674 .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \
675 .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_DESTINATION, \
676 .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \
677 .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \
678 .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED, \
679 .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \
680 .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \
681 .p_dest = (void *)NULL, \
682 .p_src = (void const *)NULL, \
683 .num_blocks = 0, \
684 .length = 0, \
685 }, \
686 .rx_transfer_cfg_extend = {.activation_source = DT_INST_IRQ_BY_NAME(index, rxi, irq)}, \
687 .rx_transfer_cfg = \
688 { \
689 .p_info = &ra_spi_data_##index.rx_transfer_info, \
690 .p_extend = &ra_spi_data_##index.rx_transfer_cfg_extend, \
691 }, \
692 .rx_transfer = \
693 { \
694 .p_ctrl = &ra_spi_data_##index.rx_transfer_ctrl, \
695 .p_cfg = &ra_spi_data_##index.rx_transfer_cfg, \
696 .p_api = &g_transfer_on_dtc, \
697 }, \
698 .tx_transfer_info = \
699 { \
700 .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED, \
701 .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_SOURCE, \
702 .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \
703 .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \
704 .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \
705 .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \
706 .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \
707 .p_dest = (void *)NULL, \
708 .p_src = (void const *)NULL, \
709 .num_blocks = 0, \
710 .length = 0, \
711 }, \
712 .tx_transfer_cfg_extend = {.activation_source = DT_INST_IRQ_BY_NAME(index, txi, irq)}, \
713 .tx_transfer_cfg = \
714 { \
715 .p_info = &ra_spi_data_##index.tx_transfer_info, \
716 .p_extend = &ra_spi_data_##index.tx_transfer_cfg_extend, \
717 }, \
718 .tx_transfer = { \
719 .p_ctrl = &ra_spi_data_##index.tx_transfer_ctrl, \
720 .p_cfg = &ra_spi_data_##index.tx_transfer_cfg, \
721 .p_api = &g_transfer_on_dtc, \
722 },
723 #endif
724
725 #define RA_SPI_INIT(index) \
726 \
727 PINCTRL_DT_INST_DEFINE(index); \
728 \
729 static const struct ra_spi_config ra_spi_config_##index = { \
730 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
731 .clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(index)), \
732 .clock_subsys = \
733 { \
734 .mstp = (uint32_t)DT_INST_CLOCKS_CELL_BY_NAME(index, spiclk, \
735 mstp), \
736 .stop_bit = DT_INST_CLOCKS_CELL_BY_NAME(index, spiclk, stop_bit), \
737 }, \
738 }; \
739 \
740 static struct ra_spi_data ra_spi_data_##index = { \
741 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(index), ctx) \
742 SPI_CONTEXT_INIT_LOCK(ra_spi_data_##index, ctx), \
743 SPI_CONTEXT_INIT_SYNC(ra_spi_data_##index, ctx), \
744 .fsp_config = \
745 { \
746 .channel = DT_INST_PROP(index, channel), \
747 .rxi_ipl = DT_INST_IRQ_BY_NAME(index, rxi, priority), \
748 .rxi_irq = DT_INST_IRQ_BY_NAME(index, rxi, irq), \
749 .txi_ipl = DT_INST_IRQ_BY_NAME(index, txi, priority), \
750 .txi_irq = DT_INST_IRQ_BY_NAME(index, txi, irq), \
751 .tei_ipl = DT_INST_IRQ_BY_NAME(index, tei, priority), \
752 .tei_irq = DT_INST_IRQ_BY_NAME(index, tei, irq), \
753 .eri_ipl = DT_INST_IRQ_BY_NAME(index, eri, priority), \
754 .eri_irq = DT_INST_IRQ_BY_NAME(index, eri, irq), \
755 }, \
756 RA_SPI_B_DTC_STRUCT_INIT(index)}; \
757 \
758 static int spi_b_ra_init##index(const struct device *dev) \
759 { \
760 RA_SPI_B_DTC_INIT(index); \
761 int err = spi_b_ra_init(dev); \
762 if (err != 0) { \
763 return err; \
764 } \
765 RA_SPI_B_IRQ_CONFIG_INIT(index); \
766 return 0; \
767 } \
768 \
769 SPI_DEVICE_DT_INST_DEFINE(index, spi_b_ra_init##index, PM_DEVICE_DT_INST_GET(index), \
770 &ra_spi_data_##index, &ra_spi_config_##index, POST_KERNEL, \
771 CONFIG_SPI_INIT_PRIORITY, &ra_spi_driver_api);
772
773 DT_INST_FOREACH_STATUS_OKAY(RA_SPI_INIT)
774