1 /*
2 * Copyright (c) 2024 Renesas Electronics Corporation
3 * SPDX-License-Identifier: Apache-2.0
4 */
5
6 #define DT_DRV_COMPAT renesas_ra_spi
7
8 #include <zephyr/drivers/spi.h>
9 #include <zephyr/drivers/pinctrl.h>
10 #include <zephyr/irq.h>
11 #include <soc.h>
12 #include <instances/r_dtc.h>
13 #include <instances/r_spi.h>
14
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(ra_spi);
17
18 #include "spi_context.h"
19
20 #if defined(CONFIG_SPI_INTERRUPT)
21 void spi_rxi_isr(void);
22 void spi_txi_isr(void);
23 void spi_tei_isr(void);
24 void spi_eri_isr(void);
25 #endif
26
27 struct ra_spi_config {
28 const struct pinctrl_dev_config *pcfg;
29 };
30
31 struct ra_spi_data {
32 struct spi_context ctx;
33 uint8_t dfs;
34 struct st_spi_instance_ctrl spi;
35 struct st_spi_cfg fsp_config;
36 struct st_spi_extended_cfg fsp_config_extend;
37 #if CONFIG_SPI_INTERRUPT
38 uint32_t data_len;
39 #endif
40 #if defined(CONFIG_SPI_RA_DTC)
41 /* RX */
42 struct st_transfer_instance rx_transfer;
43 struct st_dtc_instance_ctrl rx_transfer_ctrl;
44 struct st_transfer_info rx_transfer_info;
45 struct st_transfer_cfg rx_transfer_cfg;
46 struct st_dtc_extended_cfg rx_transfer_cfg_extend;
47
48 /* TX */
49 struct st_transfer_instance tx_transfer;
50 struct st_dtc_instance_ctrl tx_transfer_ctrl;
51 struct st_transfer_info tx_transfer_info;
52 struct st_transfer_cfg tx_transfer_cfg;
53 struct st_dtc_extended_cfg tx_transfer_cfg_extend;
54 #endif
55 };
56
spi_cb(spi_callback_args_t * p_args)57 static void spi_cb(spi_callback_args_t *p_args)
58 {
59 struct device *dev = (struct device *)p_args->p_context;
60 struct ra_spi_data *data = dev->data;
61
62 switch (p_args->event) {
63 case SPI_EVENT_TRANSFER_COMPLETE:
64 spi_context_cs_control(&data->ctx, false);
65 spi_context_complete(&data->ctx, dev, 0);
66 break;
67 case SPI_EVENT_ERR_MODE_FAULT: /* Mode fault error */
68 case SPI_EVENT_ERR_READ_OVERFLOW: /* Read overflow error */
69 case SPI_EVENT_ERR_PARITY: /* Parity error */
70 case SPI_EVENT_ERR_OVERRUN: /* Overrun error */
71 case SPI_EVENT_ERR_FRAMING: /* Framing error */
72 case SPI_EVENT_ERR_MODE_UNDERRUN: /* Underrun error */
73 spi_context_cs_control(&data->ctx, false);
74 spi_context_complete(&data->ctx, dev, -EIO);
75 break;
76 default:
77 break;
78 }
79 }
80
ra_spi_configure(const struct device * dev,const struct spi_config * config)81 static int ra_spi_configure(const struct device *dev, const struct spi_config *config)
82 {
83 struct ra_spi_data *data = dev->data;
84 fsp_err_t fsp_err;
85
86 if (spi_context_configured(&data->ctx, config)) {
87 /* Nothing to do */
88 return 0;
89 }
90
91 if (data->spi.open != 0) {
92 R_SPI_Close(&data->spi);
93 }
94
95 if ((config->operation & SPI_FRAME_FORMAT_TI) == SPI_FRAME_FORMAT_TI) {
96 return -ENOTSUP;
97 }
98
99 if (config->operation & SPI_OP_MODE_SLAVE) {
100 data->fsp_config.operating_mode = SPI_MODE_SLAVE;
101 } else {
102 data->fsp_config.operating_mode = SPI_MODE_MASTER;
103 }
104
105 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPOL) {
106 data->fsp_config.clk_polarity = SPI_CLK_POLARITY_HIGH;
107 } else {
108 data->fsp_config.clk_polarity = SPI_CLK_POLARITY_LOW;
109 }
110
111 if (SPI_MODE_GET(config->operation) & SPI_MODE_CPHA) {
112 data->fsp_config.clk_phase = SPI_CLK_PHASE_EDGE_EVEN;
113 } else {
114 if (data->fsp_config.operating_mode == SPI_MODE_MASTER) {
115 data->fsp_config.clk_phase = SPI_CLK_PHASE_EDGE_ODD;
116 } else {
117 LOG_ERR("Invalid clock phase");
118 return -EINVAL;
119 }
120 }
121
122 if (config->operation & SPI_TRANSFER_LSB) {
123 data->fsp_config.bit_order = SPI_BIT_ORDER_LSB_FIRST;
124 } else {
125 data->fsp_config.bit_order = SPI_BIT_ORDER_MSB_FIRST;
126 }
127
128 if (config->operation & SPI_CS_ACTIVE_HIGH) {
129 data->fsp_config_extend.ssl_polarity = SPI_SSLP_HIGH;
130 } else {
131 data->fsp_config_extend.ssl_polarity = SPI_SSLP_LOW;
132 }
133
134 if (!(config->operation & SPI_OP_MODE_SLAVE)) {
135 LOG_INF("frequency: %d", config->frequency);
136 fsp_err = R_SPI_CalculateBitrate(config->frequency,
137 &data->fsp_config_extend.spck_div);
138 if (fsp_err != FSP_SUCCESS) {
139 LOG_ERR("spi frequency calculate error %d", fsp_err);
140 return -EIO;
141 }
142 }
143
144 data->fsp_config_extend.spi_comm = SPI_COMMUNICATION_FULL_DUPLEX;
145 if (spi_cs_is_gpio(config) || !IS_ENABLED(CONFIG_SPI_USE_HW_SS)) {
146 data->fsp_config_extend.spi_clksyn = SPI_SSL_MODE_CLK_SYN;
147 } else {
148 data->fsp_config_extend.spi_clksyn = SPI_SSL_MODE_SPI;
149 data->fsp_config_extend.ssl_select = SPI_SSL_SELECT_SSL0;
150 }
151
152 data->fsp_config.p_extend = &data->fsp_config_extend;
153
154 data->fsp_config.p_callback = spi_cb;
155 data->fsp_config.p_context = dev;
156 fsp_err = R_SPI_Open(&data->spi, &data->fsp_config);
157 if (fsp_err != FSP_SUCCESS) {
158 LOG_ERR("R_SPI_Open error: %d", fsp_err);
159 return -EIO;
160 }
161 data->ctx.config = config;
162
163 return 0;
164 }
165
ra_spi_transfer_ongoing(struct ra_spi_data * data)166 static bool ra_spi_transfer_ongoing(struct ra_spi_data *data)
167 {
168 #if defined(CONFIG_SPI_INTERRUPT)
169 return (spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx));
170 #else
171 if (spi_context_total_tx_len(&data->ctx) < spi_context_total_rx_len(&data->ctx)) {
172 return (spi_context_tx_on(&data->ctx) || spi_context_rx_on(&data->ctx));
173 } else {
174 return (spi_context_tx_on(&data->ctx) && spi_context_rx_on(&data->ctx));
175 }
176 #endif
177 }
178
179 #ifndef CONFIG_SPI_INTERRUPT
ra_spi_transceive_slave(struct ra_spi_data * data)180 static int ra_spi_transceive_slave(struct ra_spi_data *data)
181 {
182 R_SPI0_Type *p_spi_reg = data->spi.p_regs;
183 spi_bit_width_t spi_width =
184 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
185
186 if (p_spi_reg->SPSR_b.SPTEF && spi_context_tx_buf_on(&data->ctx)) {
187 uint32_t tx;
188
189 if (data->ctx.tx_buf != NULL) {
190 if (data->dfs > 2) {
191 tx = *(uint32_t *)(data->ctx.tx_buf);
192 } else if (data->dfs > 1) {
193 tx = *(uint16_t *)(data->ctx.tx_buf);
194 } else {
195 tx = *(uint8_t *)(data->ctx.tx_buf);
196 }
197 } else {
198 tx = 0;
199 }
200
201 /* Update data register */
202 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
203 p_spi_reg->SPDR = (uint32_t)tx;
204 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
205 p_spi_reg->SPDR_BY = (uint8_t)tx;
206 } else {
207 p_spi_reg->SPDR_HA = (uint16_t)tx;
208 }
209
210 spi_context_update_tx(&data->ctx, data->dfs, 1);
211 } else {
212 p_spi_reg->SPCR_b.SPTIE = 0;
213 }
214
215 if (p_spi_reg->SPSR_b.SPRF && spi_context_rx_buf_on(&data->ctx)) {
216 uint32_t rx;
217
218 /* Update RX data */
219 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
220 rx = (uint32_t)p_spi_reg->SPDR;
221 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
222 rx = (uint8_t)p_spi_reg->SPDR_BY;
223 } else {
224 rx = (uint16_t)p_spi_reg->SPDR_HA;
225 }
226
227 if (data->dfs > 2) {
228 UNALIGNED_PUT(rx, (uint32_t *)data->ctx.rx_buf);
229 } else if (data->dfs > 1) {
230 UNALIGNED_PUT(rx, (uint16_t *)data->ctx.rx_buf);
231 } else {
232 UNALIGNED_PUT(rx, (uint8_t *)data->ctx.rx_buf);
233 }
234 spi_context_update_rx(&data->ctx, data->dfs, 1);
235 }
236
237 return 0;
238 }
239
ra_spi_transceive_master(struct ra_spi_data * data)240 static int ra_spi_transceive_master(struct ra_spi_data *data)
241 {
242 R_SPI0_Type *p_spi_reg = data->spi.p_regs;
243 spi_bit_width_t spi_width =
244 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
245 uint32_t tx;
246 uint32_t rx;
247
248 /* Tx transfer */
249 if (spi_context_tx_buf_on(&data->ctx)) {
250 if (data->dfs > 2) {
251 tx = *(uint32_t *)(data->ctx.tx_buf);
252 } else if (data->dfs > 1) {
253 tx = *(uint16_t *)(data->ctx.tx_buf);
254 } else {
255 tx = *(uint8_t *)(data->ctx.tx_buf);
256 }
257 } else {
258 tx = 0U;
259 }
260
261 while (!p_spi_reg->SPSR_b.SPTEF) {
262 }
263
264 /* Update data register */
265 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
266 p_spi_reg->SPDR = (uint32_t)tx;
267 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
268 p_spi_reg->SPDR_BY = (uint8_t)tx;
269 } else {
270 p_spi_reg->SPDR_HA = (uint16_t)tx;
271 }
272
273 spi_context_update_tx(&data->ctx, data->dfs, 1);
274
275 if (p_spi_reg->SPCR_b.TXMD == 0x0) {
276 while (!p_spi_reg->SPSR_b.SPRF) {
277 }
278
279 /* Update RX data */
280 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
281 rx = (uint32_t)p_spi_reg->SPDR;
282 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
283 rx = (uint8_t)p_spi_reg->SPDR_BY;
284 } else {
285 rx = (uint16_t)p_spi_reg->SPDR_HA;
286 }
287
288 if (spi_context_rx_buf_on(&data->ctx)) {
289 if (data->dfs > 2) {
290 UNALIGNED_PUT(rx, (uint32_t *)data->ctx.rx_buf);
291 } else if (data->dfs > 1) {
292 UNALIGNED_PUT(rx, (uint16_t *)data->ctx.rx_buf);
293 } else {
294 UNALIGNED_PUT(rx, (uint8_t *)data->ctx.rx_buf);
295 }
296 }
297 spi_context_update_rx(&data->ctx, data->dfs, 1);
298 }
299
300 return 0;
301 }
302
ra_spi_transceive_data(struct ra_spi_data * data)303 static int ra_spi_transceive_data(struct ra_spi_data *data)
304 {
305
306 uint16_t operation = data->ctx.config->operation;
307
308 if (SPI_OP_MODE_GET(operation) == SPI_OP_MODE_MASTER) {
309 ra_spi_transceive_master(data);
310 } else {
311 ra_spi_transceive_slave(data);
312 }
313
314 return 0;
315 }
316 #endif
317
transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,bool asynchronous,spi_callback_t cb,void * userdata)318 static int transceive(const struct device *dev, const struct spi_config *config,
319 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs,
320 bool asynchronous, spi_callback_t cb, void *userdata)
321 {
322 struct ra_spi_data *data = dev->data;
323 R_SPI0_Type *p_spi_reg;
324 int ret = 0;
325
326 if (!tx_bufs && !rx_bufs) {
327 return 0;
328 }
329
330 #ifndef CONFIG_SPI_INTERRUPT
331 if (asynchronous) {
332 return -ENOTSUP;
333 }
334 #endif
335
336 spi_context_lock(&data->ctx, asynchronous, cb, userdata, config);
337
338 ret = ra_spi_configure(dev, config);
339 if (ret) {
340 goto end;
341 }
342 data->dfs = ((SPI_WORD_SIZE_GET(config->operation) - 1) / 8) + 1;
343 p_spi_reg = data->spi.p_regs;
344 spi_bit_width_t spi_width =
345 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
346 /* Set buffers info */
347 spi_context_buffers_setup(&data->ctx, tx_bufs, rx_bufs, data->dfs);
348
349 spi_context_cs_control(&data->ctx, true);
350
351 #ifdef CONFIG_SPI_INTERRUPT
352 if (data->ctx.rx_len == 0) {
353 data->data_len = spi_context_is_slave(&data->ctx)
354 ? spi_context_total_tx_len(&data->ctx)
355 : data->ctx.tx_len;
356 } else if (data->ctx.tx_len == 0) {
357 data->data_len = spi_context_is_slave(&data->ctx)
358 ? spi_context_total_rx_len(&data->ctx)
359 : data->ctx.rx_len;
360 } else {
361 data->data_len = spi_context_is_slave(&data->ctx)
362 ? MAX(spi_context_total_tx_len(&data->ctx),
363 spi_context_total_rx_len(&data->ctx))
364 : MIN(data->ctx.tx_len, data->ctx.rx_len);
365 }
366
367 if (data->ctx.rx_buf == NULL) {
368 R_SPI_Write(&data->spi, data->ctx.tx_buf, data->data_len, spi_width);
369 } else if (data->ctx.tx_buf == NULL) {
370 R_SPI_Read(&data->spi, data->ctx.rx_buf, data->data_len, spi_width);
371 } else {
372 R_SPI_WriteRead(&data->spi, data->ctx.tx_buf, data->ctx.rx_buf, data->data_len,
373 spi_width);
374 }
375
376 ret = spi_context_wait_for_completion(&data->ctx);
377
378 #else
379 p_spi_reg->SPCR_b.TXMD = (0x0);
380 if (!spi_context_tx_on(&data->ctx)) {
381 p_spi_reg->SPCR_b.TXMD = 0x0;
382 }
383 if (!spi_context_rx_on(&data->ctx)) {
384 p_spi_reg->SPCR_b.TXMD = 0x1; /* tx only */
385 }
386
387 uint32_t spdcr = p_spi_reg->SPDCR;
388
389 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
390 /* Configure Word access to data register. */
391 spdcr &= ~R_SPI0_SPDCR_SPBYT_Msk;
392 spdcr |= R_SPI0_SPDCR_SPLW_Msk;
393 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
394 /* Set SPBYT so 8bit transfer works with the DTC/DMAC. */
395 spdcr |= R_SPI0_SPDCR_SPBYT_Msk;
396 } else {
397 /* Configure Half-Word access to data register. */
398 spdcr &= ~(R_SPI0_SPDCR_SPBYT_Msk | R_SPI0_SPDCR_SPLW_Msk);
399 }
400
401 /* Configure data length based on the selected bit width . */
402 uint32_t bit_width = spi_width;
403
404 if (bit_width > SPI_BIT_WIDTH_16_BITS) {
405 bit_width = ((bit_width + 1) >> 2) - 5;
406 }
407 p_spi_reg->SPDCR = (uint8_t)spdcr;
408 p_spi_reg->SPCMD[0] |= bit_width << 8;
409
410 /* Enable the SPI Transfer. */
411 p_spi_reg->SPCR |= R_SPI0_SPCR_SPE_Msk;
412
413 do {
414 ra_spi_transceive_data(data);
415 } while (ra_spi_transfer_ongoing(data));
416
417 /* Wait for transmision complete */
418 while (p_spi_reg->SPSR_b.IDLNF) {
419 }
420 /* Disable the SPI Transfer. */
421 p_spi_reg->SPCR_b.SPE = 0;
422
423 spi_context_cs_control(&data->ctx, false);
424 spi_context_complete(&data->ctx, dev, 0);
425 #endif
426 #ifdef CONFIG_SPI_SLAVE
427 if (spi_context_is_slave(&data->ctx) && !ret) {
428 ret = data->ctx.recv_frames;
429 }
430 #endif /* CONFIG_SPI_SLAVE */
431
432 end:
433 spi_context_release(&data->ctx, ret);
434
435 return ret;
436 }
437
ra_spi_transceive(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs)438 static int ra_spi_transceive(const struct device *dev, const struct spi_config *config,
439 const struct spi_buf_set *tx_bufs, const struct spi_buf_set *rx_bufs)
440 {
441 return transceive(dev, config, tx_bufs, rx_bufs, false, NULL, NULL);
442 }
443
444 #ifdef CONFIG_SPI_ASYNC
ra_spi_transceive_async(const struct device * dev,const struct spi_config * config,const struct spi_buf_set * tx_bufs,const struct spi_buf_set * rx_bufs,spi_callback_t cb,void * userdata)445 static int ra_spi_transceive_async(const struct device *dev, const struct spi_config *config,
446 const struct spi_buf_set *tx_bufs,
447 const struct spi_buf_set *rx_bufs, spi_callback_t cb,
448 void *userdata)
449 {
450 return transceive(dev, config, tx_bufs, rx_bufs, true, cb, userdata);
451 }
452 #endif /* CONFIG_SPI_ASYNC */
453
ra_spi_release(const struct device * dev,const struct spi_config * config)454 static int ra_spi_release(const struct device *dev, const struct spi_config *config)
455 {
456 struct ra_spi_data *data = dev->data;
457
458 spi_context_unlock_unconditionally(&data->ctx);
459
460 return 0;
461 }
462
463 static DEVICE_API(spi, ra_spi_driver_api) = {.transceive = ra_spi_transceive,
464 #ifdef CONFIG_SPI_ASYNC
465 .transceive_async = ra_spi_transceive_async,
466 #endif /* CONFIG_SPI_ASYNC */
467 .release = ra_spi_release};
468
spi_ra_init(const struct device * dev)469 static int spi_ra_init(const struct device *dev)
470 {
471 const struct ra_spi_config *config = dev->config;
472 struct ra_spi_data *data = dev->data;
473 int ret;
474
475 /* Configure dt provided device signals when available */
476 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
477 if (ret < 0) {
478 return ret;
479 }
480
481 ret = spi_context_cs_configure_all(&data->ctx);
482 if (ret < 0) {
483 return ret;
484 }
485
486 spi_context_unlock_unconditionally(&data->ctx);
487
488 return 0;
489 }
490
491 #if defined(CONFIG_SPI_INTERRUPT)
492
ra_spi_retransmit(struct ra_spi_data * data)493 static void ra_spi_retransmit(struct ra_spi_data *data)
494 {
495 spi_bit_width_t spi_width =
496 (spi_bit_width_t)(SPI_WORD_SIZE_GET(data->ctx.config->operation) - 1);
497
498 if (data->ctx.rx_len == 0) {
499 data->data_len = data->ctx.tx_len;
500 data->spi.p_tx_data = data->ctx.tx_buf;
501 data->spi.p_rx_data = NULL;
502 } else if (data->ctx.tx_len == 0) {
503 data->data_len = data->ctx.rx_len;
504 data->spi.p_tx_data = NULL;
505 data->spi.p_rx_data = data->ctx.rx_buf;
506 } else {
507 data->data_len = MIN(data->ctx.tx_len, data->ctx.rx_len);
508 data->spi.p_tx_data = data->ctx.tx_buf;
509 data->spi.p_rx_data = data->ctx.rx_buf;
510 }
511
512 data->spi.bit_width = spi_width;
513 data->spi.rx_count = 0;
514 data->spi.tx_count = 0;
515 data->spi.count = data->data_len;
516
517 #ifdef CONFIG_SPI_RA_DTC
518 /* Determine DTC transfer size */
519 transfer_size_t size;
520
521 if (SPI_BIT_WIDTH_16_BITS < spi_width) {
522 size = TRANSFER_SIZE_4_BYTE;
523 } else if (SPI_BIT_WIDTH_8_BITS >= spi_width) {
524 size = TRANSFER_SIZE_1_BYTE;
525 } else {
526 size = TRANSFER_SIZE_2_BYTE;
527 }
528
529 if (data->spi.p_cfg->p_transfer_rx) {
530 /* When the rxi interrupt is called, all transfers will be finished. */
531 data->spi.rx_count = data->data_len;
532
533 transfer_instance_t *p_transfer_rx =
534 (transfer_instance_t *)data->spi.p_cfg->p_transfer_rx;
535 transfer_info_t *p_info = p_transfer_rx->p_cfg->p_info;
536
537 /* Configure the receive DMA instance. */
538 p_info->transfer_settings_word_b.size = size;
539 p_info->length = (uint16_t)data->data_len;
540 p_info->transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED;
541 p_info->p_dest = data->ctx.rx_buf;
542
543 if (NULL == data->ctx.rx_buf) {
544 static uint32_t dummy_rx;
545
546 p_info->transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED;
547 p_info->p_dest = &dummy_rx;
548 }
549
550 p_transfer_rx->p_api->reconfigure(p_transfer_rx->p_ctrl, p_info);
551 }
552
553 if (data->spi.p_cfg->p_transfer_tx) {
554 /* When the txi interrupt is called, all transfers will be finished. */
555 data->spi.tx_count = data->data_len;
556
557 transfer_instance_t *p_transfer_tx =
558 (transfer_instance_t *)data->spi.p_cfg->p_transfer_tx;
559 transfer_info_t *p_info = p_transfer_tx->p_cfg->p_info;
560
561 /* Configure the transmit DMA instance. */
562 p_info->transfer_settings_word_b.size = size;
563 p_info->length = (uint16_t)data->data_len;
564 p_info->transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED;
565 p_info->p_src = data->ctx.tx_buf;
566
567 if (NULL == data->ctx.tx_buf) {
568 static uint32_t dummy_tx;
569
570 p_info->transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED;
571 p_info->p_src = &dummy_tx;
572 }
573 R_SPI0_Type *p_spi_reg = data->spi.p_regs;
574
575 p_transfer_tx->p_api->reconfigure(p_transfer_tx->p_ctrl, p_info);
576
577 /* Enable the SPI Transfer */
578 p_spi_reg->SPCR |= R_SPI0_SPCR_SPE_Msk;
579 }
580
581 #endif
582 }
583
ra_spi_rxi_isr(const struct device * dev)584 static void ra_spi_rxi_isr(const struct device *dev)
585 {
586 #ifndef CONFIG_SPI_SLAVE
587 ARG_UNUSED(dev);
588 spi_rxi_isr();
589 #else
590 struct ra_spi_data *data = dev->data;
591
592 spi_rxi_isr();
593
594 if (spi_context_is_slave(&data->ctx) && data->spi.rx_count == data->spi.count) {
595 if (data->ctx.rx_buf != NULL && data->ctx.tx_buf != NULL) {
596 data->ctx.recv_frames = MIN(spi_context_total_tx_len(&data->ctx),
597 spi_context_total_rx_len(&data->ctx));
598 } else if (data->ctx.tx_buf == NULL) {
599 data->ctx.recv_frames = data->data_len;
600 }
601 R_BSP_IrqDisable(data->fsp_config.tei_irq);
602
603 /* Writing 0 to SPE generatates a TXI IRQ. Disable the TXI IRQ.
604 * (See Section 38.2.1 SPI Control Register in the RA6T2 manual R01UH0886EJ0100).
605 */
606 R_BSP_IrqDisable(data->fsp_config.txi_irq);
607
608 /* Disable the SPI Transfer. */
609 data->spi.p_regs->SPCR_b.SPE = 0;
610
611 /* Re-enable the TXI IRQ and clear the pending IRQ. */
612 R_BSP_IrqEnable(data->fsp_config.txi_irq);
613
614 spi_context_cs_control(&data->ctx, false);
615 spi_context_complete(&data->ctx, dev, 0);
616 }
617
618 #endif
619 }
620
ra_spi_txi_isr(const struct device * dev)621 static void ra_spi_txi_isr(const struct device *dev)
622 {
623 ARG_UNUSED(dev);
624 spi_txi_isr();
625 }
626
ra_spi_tei_isr(const struct device * dev)627 static void ra_spi_tei_isr(const struct device *dev)
628 {
629 struct ra_spi_data *data = dev->data;
630 R_SPI0_Type *p_spi_reg = data->spi.p_regs;
631
632 if (data->spi.rx_count == data->spi.count) {
633 spi_context_update_rx(&data->ctx, 1, data->data_len);
634 }
635 if (data->spi.tx_count == data->spi.count) {
636 spi_context_update_tx(&data->ctx, 1, data->data_len);
637 }
638
639 if (ra_spi_transfer_ongoing(data)) {
640 R_BSP_IrqDisable(data->spi.p_cfg->txi_irq);
641 /* Disable the SPI Transfer. */
642 p_spi_reg->SPCR_b.SPE = 0U;
643 /* Clear the status register. */
644 p_spi_reg->SPSR;
645 p_spi_reg->SPSR = 0;
646 R_BSP_IrqEnable(data->spi.p_cfg->txi_irq);
647
648 #ifndef CONFIG_SPI_RA_DTC
649 /* Enable the SPI Transfer */
650 p_spi_reg->SPCR |= R_SPI0_SPCR_SPE_Msk;
651 #endif
652
653 R_ICU->IELSR_b[data->fsp_config.tei_irq].IR = 0U;
654 ra_spi_retransmit(data);
655 } else {
656 spi_tei_isr();
657 }
658 }
659
ra_spi_eri_isr(const struct device * dev)660 static void ra_spi_eri_isr(const struct device *dev)
661 {
662 ARG_UNUSED(dev);
663 spi_eri_isr();
664 }
665 #endif
666
667 #define _ELC_EVENT_SPI_RXI(channel) ELC_EVENT_SPI##channel##_RXI
668 #define _ELC_EVENT_SPI_TXI(channel) ELC_EVENT_SPI##channel##_TXI
669 #define _ELC_EVENT_SPI_TEI(channel) ELC_EVENT_SPI##channel##_TEI
670 #define _ELC_EVENT_SPI_ERI(channel) ELC_EVENT_SPI##channel##_ERI
671
672 #define ELC_EVENT_SPI_RXI(channel) _ELC_EVENT_SPI_RXI(channel)
673 #define ELC_EVENT_SPI_TXI(channel) _ELC_EVENT_SPI_TXI(channel)
674 #define ELC_EVENT_SPI_TEI(channel) _ELC_EVENT_SPI_TEI(channel)
675 #define ELC_EVENT_SPI_ERI(channel) _ELC_EVENT_SPI_ERI(channel)
676
677 #if defined(CONFIG_SPI_INTERRUPT)
678
679 #define RA_SPI_IRQ_CONFIG_INIT(index) \
680 do { \
681 ARG_UNUSED(dev); \
682 \
683 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, rxi, irq)] = \
684 ELC_EVENT_SPI_RXI(DT_INST_PROP(index, channel)); \
685 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, txi, irq)] = \
686 ELC_EVENT_SPI_TXI(DT_INST_PROP(index, channel)); \
687 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, tei, irq)] = \
688 ELC_EVENT_SPI_TEI(DT_INST_PROP(index, channel)); \
689 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, eri, irq)] = \
690 ELC_EVENT_SPI_ERI(DT_INST_PROP(index, channel)); \
691 \
692 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, rxi, irq), \
693 DT_INST_IRQ_BY_NAME(index, rxi, priority), ra_spi_rxi_isr, \
694 DEVICE_DT_INST_GET(index), 0); \
695 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, txi, irq), \
696 DT_INST_IRQ_BY_NAME(index, txi, priority), ra_spi_txi_isr, \
697 DEVICE_DT_INST_GET(index), 0); \
698 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, tei, irq), \
699 DT_INST_IRQ_BY_NAME(index, tei, priority), ra_spi_tei_isr, \
700 DEVICE_DT_INST_GET(index), 0); \
701 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, eri, irq), \
702 DT_INST_IRQ_BY_NAME(index, eri, priority), ra_spi_eri_isr, \
703 DEVICE_DT_INST_GET(index), 0); \
704 \
705 irq_enable(DT_INST_IRQ_BY_NAME(index, rxi, irq)); \
706 irq_enable(DT_INST_IRQ_BY_NAME(index, txi, irq)); \
707 irq_enable(DT_INST_IRQ_BY_NAME(index, eri, irq)); \
708 } while (0)
709
710 #else
711
712 #define RA_SPI_IRQ_CONFIG_INIT(index)
713
714 #endif
715
716 #ifndef CONFIG_SPI_RA_DTC
717 #define RA_SPI_DTC_STRUCT_INIT(index)
718 #define RA_SPI_DTC_INIT(index)
719 #else
720 #define RA_SPI_DTC_INIT(index) \
721 do { \
722 if (DT_INST_PROP_OR(index, rx_dtc, false)) { \
723 ra_spi_data_##index.fsp_config.p_transfer_rx = \
724 &ra_spi_data_##index.rx_transfer; \
725 } \
726 if (DT_INST_PROP_OR(index, tx_dtc, false)) { \
727 ra_spi_data_##index.fsp_config.p_transfer_tx = \
728 &ra_spi_data_##index.tx_transfer; \
729 } \
730 } while (0)
731 #define RA_SPI_DTC_STRUCT_INIT(index) \
732 .rx_transfer_info = \
733 { \
734 .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \
735 .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_DESTINATION, \
736 .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \
737 .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \
738 .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_FIXED, \
739 .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \
740 .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \
741 .p_dest = (void *)NULL, \
742 .p_src = (void const *)NULL, \
743 .num_blocks = 0, \
744 .length = 0, \
745 }, \
746 .rx_transfer_cfg_extend = {.activation_source = DT_INST_IRQ_BY_NAME(index, rxi, irq)}, \
747 .rx_transfer_cfg = \
748 { \
749 .p_info = &ra_spi_data_##index.rx_transfer_info, \
750 .p_extend = &ra_spi_data_##index.rx_transfer_cfg_extend, \
751 }, \
752 .rx_transfer = \
753 { \
754 .p_ctrl = &ra_spi_data_##index.rx_transfer_ctrl, \
755 .p_cfg = &ra_spi_data_##index.rx_transfer_cfg, \
756 .p_api = &g_transfer_on_dtc, \
757 }, \
758 .tx_transfer_info = \
759 { \
760 .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED, \
761 .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_SOURCE, \
762 .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \
763 .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \
764 .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \
765 .transfer_settings_word_b.size = TRANSFER_SIZE_1_BYTE, \
766 .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \
767 .p_dest = (void *)NULL, \
768 .p_src = (void const *)NULL, \
769 .num_blocks = 0, \
770 .length = 0, \
771 }, \
772 .tx_transfer_cfg_extend = {.activation_source = DT_INST_IRQ_BY_NAME(index, txi, irq)}, \
773 .tx_transfer_cfg = \
774 { \
775 .p_info = &ra_spi_data_##index.tx_transfer_info, \
776 .p_extend = &ra_spi_data_##index.tx_transfer_cfg_extend, \
777 }, \
778 .tx_transfer = { \
779 .p_ctrl = &ra_spi_data_##index.tx_transfer_ctrl, \
780 .p_cfg = &ra_spi_data_##index.tx_transfer_cfg, \
781 .p_api = &g_transfer_on_dtc, \
782 },
783 #endif
784
785 #define RA_SPI_INIT(index) \
786 \
787 PINCTRL_DT_INST_DEFINE(index); \
788 \
789 static const struct ra_spi_config ra_spi_config_##index = { \
790 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
791 }; \
792 \
793 static struct ra_spi_data ra_spi_data_##index = { \
794 SPI_CONTEXT_CS_GPIOS_INITIALIZE(DT_DRV_INST(index), ctx) \
795 SPI_CONTEXT_INIT_LOCK(ra_spi_data_##index, ctx), \
796 SPI_CONTEXT_INIT_SYNC(ra_spi_data_##index, ctx), \
797 .fsp_config = \
798 { \
799 .channel = DT_INST_PROP(index, channel), \
800 .rxi_ipl = DT_INST_IRQ_BY_NAME(index, rxi, priority), \
801 .rxi_irq = DT_INST_IRQ_BY_NAME(index, rxi, irq), \
802 .txi_ipl = DT_INST_IRQ_BY_NAME(index, txi, priority), \
803 .txi_irq = DT_INST_IRQ_BY_NAME(index, txi, irq), \
804 .tei_ipl = DT_INST_IRQ_BY_NAME(index, tei, priority), \
805 .tei_irq = DT_INST_IRQ_BY_NAME(index, tei, irq), \
806 .eri_ipl = DT_INST_IRQ_BY_NAME(index, eri, priority), \
807 .eri_irq = DT_INST_IRQ_BY_NAME(index, eri, irq), \
808 }, \
809 RA_SPI_DTC_STRUCT_INIT(index)}; \
810 \
811 static int spi_ra_init##index(const struct device *dev) \
812 { \
813 RA_SPI_DTC_INIT(index); \
814 int err = spi_ra_init(dev); \
815 if (err != 0) { \
816 return err; \
817 } \
818 RA_SPI_IRQ_CONFIG_INIT(index); \
819 return 0; \
820 } \
821 \
822 DEVICE_DT_INST_DEFINE(index, spi_ra_init##index, PM_DEVICE_DT_INST_GET(index), \
823 &ra_spi_data_##index, &ra_spi_config_##index, PRE_KERNEL_1, \
824 CONFIG_SPI_INIT_PRIORITY, &ra_spi_driver_api);
825
826 DT_INST_FOREACH_STATUS_OKAY(RA_SPI_INIT)
827