1 /*
2 * Copyright (c) 2024 Renesas Electronics Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT renesas_ra_sdhc
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/devicetree.h>
11 #include <zephyr/drivers/pinctrl.h>
12 #include <zephyr/drivers/gpio.h>
13 #include <zephyr/irq.h>
14 #include <soc.h>
15 #include <zephyr/logging/log.h>
16 #include <zephyr/drivers/sdhc.h>
17
18 /* Renesas include */
19 #include "sdhc_renesas_ra.h"
20 #include "r_sdhi.h"
21 #include "r_dtc.h"
22 #include "r_sdhi_private.h"
23
24 LOG_MODULE_REGISTER(sdhc_renesas_ra, CONFIG_SDHC_LOG_LEVEL);
25
26 /*
27 * The extern functions below are implemented in the r_sdhi.c source file.
28 * For more information, please refer to r_sdhi.c in HAL Renesas
29 */
30 extern fsp_err_t r_sdhi_transfer_write(sdhi_instance_ctrl_t *const p_ctrl, uint32_t block_count,
31 uint32_t bytes, const uint8_t *p_data);
32 extern fsp_err_t r_sdhi_transfer_read(sdhi_instance_ctrl_t *const p_ctrl, uint32_t block_count,
33 uint32_t bytes, void *p_data);
34 extern fsp_err_t r_sdhi_max_clock_rate_set(sdhi_instance_ctrl_t *p_ctrl, uint32_t max_rate);
35 extern fsp_err_t r_sdhi_hw_cfg(sdhi_instance_ctrl_t *const p_ctrl);
36 extern fsp_err_t r_sdhi_read_and_block(sdhi_instance_ctrl_t *const p_ctrl, uint32_t command,
37 uint32_t argument, uint32_t byte_count);
38 extern fsp_err_t r_sdhi_wait_for_device(sdhi_instance_ctrl_t *const p_ctrl);
39 extern fsp_err_t r_sdhi_wait_for_event(sdhi_instance_ctrl_t *const p_ctrl, uint32_t bit,
40 uint32_t timeout);
41 extern void r_sdhi_command_send_no_wait(sdhi_instance_ctrl_t *p_ctrl, uint32_t command,
42 uint32_t argument);
43 extern void r_sdhi_read_write_common(sdhi_instance_ctrl_t *const p_ctrl, uint32_t sector_count,
44 uint32_t sector_size, uint32_t command, uint32_t argument);
45
46 struct sdhc_ra_config {
47 const struct pinctrl_dev_config *pcfg;
48 void *const regs;
49 };
50
51 struct sdhc_ra_priv {
52 struct st_sdmmc_instance_ctrl sdmmc_ctrl;
53 struct st_sdmmc_cfg fsp_config;
54 struct gpio_dt_spec sdhi_en;
55 struct sdmmc_ra_event sdmmc_event;
56 uint8_t channel;
57 bool app_cmd;
58 uint32_t bus_clock;
59 uint8_t bus_width;
60 enum sdhc_timing_mode timing;
61 enum sdhc_power power_mode;
62 struct k_sem thread_lock;
63 uint8_t status;
64 struct sdhc_host_props props;
65 /* Transfer DTC */
66 struct st_transfer_instance transfer;
67 struct st_dtc_instance_ctrl transfer_ctrl;
68 struct st_transfer_info transfer_info;
69 struct st_transfer_cfg transfer_cfg;
70 struct st_dtc_extended_cfg transfer_cfg_extend;
71 };
72
73 void sdhimmc_accs_isr(void);
74 void sdhimmc_card_isr(void);
75 void sdhimmc_dma_req_isr(void);
76
ra_sdmmc_accs_isr(const void * parameter)77 static void ra_sdmmc_accs_isr(const void *parameter)
78 {
79 ARG_UNUSED(parameter);
80 sdhimmc_accs_isr();
81 }
82
ra_sdmmc_card_isr(const void * parameter)83 static void ra_sdmmc_card_isr(const void *parameter)
84 {
85 ARG_UNUSED(parameter);
86 sdhimmc_card_isr();
87 }
88
ra_sdmmc_dma_req_isr(const void * parameter)89 static void ra_sdmmc_dma_req_isr(const void *parameter)
90 {
91 ARG_UNUSED(parameter);
92 sdhimmc_dma_req_isr();
93 }
94
sdhc_ra_get_card_present(const struct device * dev)95 static int sdhc_ra_get_card_present(const struct device *dev)
96 {
97 struct sdhc_ra_priv *priv = dev->data;
98 fsp_err_t fsp_err;
99 int ret;
100 sdmmc_status_t status;
101
102 /* SDMMC_CARD_DETECT_CD must be configured as true to check here */
103 fsp_err = R_SDHI_StatusGet(&priv->sdmmc_ctrl, &status);
104 ret = err_fsp2zep(fsp_err);
105 if (ret < 0) {
106 return ret;
107 }
108
109 return (status.card_inserted);
110 }
111
sdhc_ra_card_busy(const struct device * dev)112 static int sdhc_ra_card_busy(const struct device *dev)
113 {
114 struct sdhc_ra_priv *priv = dev->data;
115 fsp_err_t fsp_err;
116 int ret;
117 sdmmc_status_t status;
118
119 fsp_err = R_SDHI_StatusGet(&priv->sdmmc_ctrl, &status);
120 ret = err_fsp2zep(fsp_err);
121 if (ret < 0) {
122 return ret;
123 }
124
125 return (status.transfer_in_progress);
126 }
127
sdhi_command_send_wait(sdhi_instance_ctrl_t * p_ctrl,uint32_t command,uint32_t argument,uint32_t timeout)128 static int sdhi_command_send_wait(sdhi_instance_ctrl_t *p_ctrl, uint32_t command, uint32_t argument,
129 uint32_t timeout)
130 {
131 /* Verify the device is not busy. */
132 r_sdhi_wait_for_device(p_ctrl);
133
134 /* Send the command. */
135 r_sdhi_command_send_no_wait(p_ctrl, command, argument);
136
137 /* Wait for end of response, error or timeout */
138 return r_sdhi_wait_for_event(p_ctrl, SDHI_PRV_RESPONSE_BIT, timeout);
139 }
140
sdhc_ra_send_cmd(struct sdhc_ra_priv * priv,struct sdmmc_ra_command * ra_cmd,int retries)141 static int sdhc_ra_send_cmd(struct sdhc_ra_priv *priv, struct sdmmc_ra_command *ra_cmd, int retries)
142 {
143 int fsp_err = 0;
144
145 while (retries > 0) {
146 fsp_err = sdhi_command_send_wait(&priv->sdmmc_ctrl, ra_cmd->opcode, ra_cmd->arg,
147 ra_cmd->timeout_ms);
148 if (fsp_err != 0) {
149 retries--; /* error, retry */
150 } else {
151 break;
152 }
153 }
154 return err_fsp2zep(fsp_err);
155 }
156
157 /*
158 * Send CMD or CMD/DATA via SDHC
159 */
sdhc_ra_request(const struct device * dev,struct sdhc_command * cmd,struct sdhc_data * data)160 static int sdhc_ra_request(const struct device *dev, struct sdhc_command *cmd,
161 struct sdhc_data *data)
162 {
163 struct sdhc_ra_priv *priv = dev->data;
164 int retries = (int)(cmd->retries + 1); /* first try plus retries */
165 uint32_t timeout_cfg = 0;
166 fsp_err_t fsp_err = 0;
167 int ret = 0;
168 sdmmc_priv_csd_reg_t p_csd_reg;
169
170 struct sdmmc_ra_command ra_cmd = {
171 .opcode = cmd->opcode,
172 .arg = cmd->arg,
173 };
174
175 if (data) {
176 ra_cmd.data = (uint8_t *)data->data;
177 ra_cmd.sector_count = data->blocks;
178 ra_cmd.sector_size = data->block_size;
179 timeout_cfg = data->timeout_ms;
180 } else {
181 timeout_cfg = cmd->timeout_ms;
182 }
183
184 if (cmd->timeout_ms == SDHC_TIMEOUT_FOREVER) {
185 ra_cmd.timeout_ms = SDHI_TIME_OUT_MAX;
186 } else {
187 ra_cmd.timeout_ms = timeout_cfg;
188 }
189
190 /* Reset semaphore */
191 k_sem_reset(&priv->sdmmc_event.transfer_sem);
192 k_sem_take(&priv->thread_lock, K_FOREVER);
193 if (ret < 0) {
194 LOG_ERR("Can not take sem!");
195 goto end;
196 }
197
198 /*
199 * Handle opcode with RA specifics
200 */
201 switch (cmd->opcode) {
202 case SD_GO_IDLE_STATE:
203 case SD_ALL_SEND_CID:
204 case SD_SEND_RELATIVE_ADDR:
205 case SD_SELECT_CARD:
206 case SD_SEND_IF_COND:
207 case SD_SET_BLOCK_SIZE:
208 case SD_ERASE_BLOCK_START:
209 case SD_ERASE_BLOCK_END:
210 case SD_ERASE_BLOCK_OPERATION:
211 case SD_APP_CMD:
212 case SD_SEND_STATUS:
213 /* Send command with argument */
214 ret = sdhc_ra_send_cmd(priv, &ra_cmd, retries);
215 if (ret < 0) {
216 goto end;
217 }
218 break;
219 case SD_SEND_CSD:
220 /* Read card specific data register */
221 ret = sdhc_ra_send_cmd(priv, &ra_cmd, retries);
222 if (ret < 0) {
223 goto end;
224 }
225 /* SDResponseR2 are bits from 8-127, first 8 MSBs are reserved */
226 p_csd_reg.reg.sdrsp10 = priv->sdmmc_ctrl.p_reg->SD_RSP10;
227 p_csd_reg.reg.sdrsp32 = priv->sdmmc_ctrl.p_reg->SD_RSP32;
228 p_csd_reg.reg.sdrsp54 = priv->sdmmc_ctrl.p_reg->SD_RSP54;
229 p_csd_reg.reg.sdrsp76 = priv->sdmmc_ctrl.p_reg->SD_RSP76;
230
231 /* Get the CSD version. */
232 uint32_t csd_version = p_csd_reg.csd_v1_b.csd_structure;
233 uint32_t mult;
234
235 if ((SDHI_PRV_CSD_VERSION_1_0 == csd_version) ||
236 (SDMMC_CARD_TYPE_MMC == priv->sdmmc_ctrl.device.card_type)) {
237 mult = (1U << (p_csd_reg.csd_v1_b.c_size_mult + 2));
238 priv->sdmmc_ctrl.device.sector_count =
239 ((p_csd_reg.csd_v1_b.c_size + 1U) * mult);
240
241 /* Scale the sector count by the actual block size. */
242 uint32_t read_sector_size = 1U << p_csd_reg.csd_v1_b.read_bl_len;
243
244 priv->sdmmc_ctrl.device.sector_count =
245 priv->sdmmc_ctrl.device.sector_count *
246 (read_sector_size / SDHI_MAX_BLOCK_SIZE);
247
248 if (SDMMC_CARD_TYPE_MMC == priv->sdmmc_ctrl.device.card_type) {
249 /*
250 * If c_size is 0xFFF, then sector_count should be obtained from the
251 * extended CSD. Set it to 0 to indicate it should come from the
252 * extended CSD later.
253 */
254 if (SDHI_PRV_SECTOR_COUNT_IN_EXT_CSD == p_csd_reg.csd_v1_b.c_size) {
255 priv->sdmmc_ctrl.device.sector_count = 0U;
256 }
257 }
258 }
259
260 #if SDHI_CFG_SD_SUPPORT_ENABLE
261 else if (SDHI_PRV_CSD_VERSION_2_0 == csd_version) {
262 priv->sdmmc_ctrl.device.sector_count =
263 (p_csd_reg.csd_v2_b.c_size + 1U) * SDHI_PRV_BYTES_PER_KILOBYTE;
264 } else {
265 /* Do Nothing */
266 }
267
268 if (SDHI_PRV_CSD_VERSION_1_0 == csd_version) {
269 /* Get the minimum erasable unit (in 512 byte sectors). */
270 priv->sdmmc_ctrl.device.erase_sector_count =
271 p_csd_reg.csd_v1_b.sector_size + 1U;
272 } else
273 #endif
274 {
275 /*
276 * For SDHC and SDXC cards, there are no erase group restrictions.
277 * Using the eMMC TRIM operation, there are no erase group restrictions.
278 */
279 priv->sdmmc_ctrl.device.erase_sector_count = 1U;
280 }
281 break;
282 case SD_APP_SEND_OP_COND:
283 ra_cmd.opcode |= SDHI_PRV_CMD_C_ACMD;
284 ret = sdhc_ra_send_cmd(priv, &ra_cmd, retries);
285 if (ret < 0) {
286 goto end;
287 }
288 sdmmc_response_t response;
289 /* get response of ACMD41 (R3) */
290 response.status = priv->sdmmc_ctrl.p_reg->SD_RSP10;
291 /* Initialization complete? */
292 if (response.r3.power_up_status) {
293 /* High capacity card ? */
294 /* 0 = SDSC, 1 = SDHC or SDXC */
295 priv->sdmmc_ctrl.sector_addressing =
296 (response.r3.card_capacity_status > 0U);
297 priv->sdmmc_ctrl.device.card_type = SDMMC_CARD_TYPE_SD;
298 }
299 priv->sdmmc_ctrl.initialized = true;
300 break;
301 case SD_SWITCH:
302 /* Check app cmd */
303 if (priv->app_cmd && cmd->opcode == SD_APP_SET_BUS_WIDTH) {
304 /* ACMD41*/
305 ra_cmd.opcode |= SDHI_PRV_CMD_C_ACMD;
306 ret = sdhc_ra_send_cmd(priv, &ra_cmd, retries);
307 if (ret < 0) {
308 goto end;
309 }
310 } else {
311 /* SD SWITCH CMD6*/
312 fsp_err = r_sdhi_read_and_block(&priv->sdmmc_ctrl, ra_cmd.opcode,
313 ra_cmd.arg, ra_cmd.sector_size);
314 ret = err_fsp2zep(fsp_err);
315 if (ret < 0) {
316 goto end;
317 }
318 memcpy(ra_cmd.data, priv->sdmmc_ctrl.aligned_buff, 8);
319 priv->sdmmc_event.transfer_completed = false;
320 break;
321 }
322 break;
323
324 /* Read write with data */
325 case SD_APP_SEND_SCR:
326 ra_cmd.opcode = cmd->opcode | SDHI_PRV_CMD_C_ACMD;
327 fsp_err = r_sdhi_read_and_block(&priv->sdmmc_ctrl, ra_cmd.opcode, ra_cmd.arg,
328 ra_cmd.sector_size);
329
330 if (fsp_err != 0) {
331 ret = -ETIMEDOUT;
332 goto end;
333 }
334 memcpy(ra_cmd.data, priv->sdmmc_ctrl.aligned_buff, 8);
335 priv->sdmmc_event.transfer_completed = false;
336 break;
337 case SD_READ_SINGLE_BLOCK:
338 case SD_READ_MULTIPLE_BLOCK:
339 /* Configure the transfer interface for reading.*/
340 fsp_err = r_sdhi_transfer_read(&priv->sdmmc_ctrl, ra_cmd.sector_count,
341 ra_cmd.sector_size, ra_cmd.data);
342 ret = err_fsp2zep(fsp_err);
343 if (ret < 0) {
344 goto end;
345 }
346
347 r_sdhi_read_write_common(&priv->sdmmc_ctrl, ra_cmd.sector_count, ra_cmd.sector_size,
348 ra_cmd.opcode, ra_cmd.arg);
349
350 /* Verify card is back in transfer state after write */
351 ret = k_sem_take(&priv->sdmmc_event.transfer_sem, K_MSEC(ra_cmd.timeout_ms));
352 if (ret < 0) {
353 LOG_ERR("Can not take sem!");
354 goto end;
355 }
356
357 if (!priv->sdmmc_event.transfer_completed) {
358 ret = -EIO;
359 goto end;
360 }
361
362 priv->sdmmc_event.transfer_completed = false;
363 break;
364
365 case SD_WRITE_SINGLE_BLOCK:
366 case SD_WRITE_MULTIPLE_BLOCK:
367
368 fsp_err = r_sdhi_transfer_write(&priv->sdmmc_ctrl, ra_cmd.sector_count,
369 ra_cmd.sector_size, ra_cmd.data);
370 ret = err_fsp2zep(fsp_err);
371 if (ret < 0) {
372 goto end;
373 }
374 /* Send command with data for reading */
375 r_sdhi_read_write_common(&priv->sdmmc_ctrl, ra_cmd.sector_count, ra_cmd.sector_size,
376 ra_cmd.opcode, ra_cmd.arg);
377
378 /* Verify card is back in transfer state after write */
379 ret = k_sem_take(&priv->sdmmc_event.transfer_sem, K_MSEC(ra_cmd.timeout_ms));
380 if (ret < 0) {
381 LOG_ERR("Can not take sem!");
382 goto end;
383 }
384
385 if (!priv->sdmmc_event.transfer_completed) {
386 ret = -EIO;
387 goto end;
388 }
389
390 priv->sdmmc_event.transfer_completed = false;
391 break;
392
393 default:
394 LOG_INF("SDHC driver: command %u not supported", cmd->opcode);
395 ret = -ENOTSUP;
396 }
397
398 if (ra_cmd.opcode == SD_ALL_SEND_CID || ra_cmd.opcode == SD_SEND_CSD) {
399 /* SDResponseR2 are bits from 8-127, first 8 MSBs are reserved */
400 p_csd_reg.reg.sdrsp10 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP10 << 8;
401 p_csd_reg.reg.sdrsp32 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP32 << 8;
402 p_csd_reg.reg.sdrsp54 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP54 << 8;
403 p_csd_reg.reg.sdrsp76 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP76 << 8;
404
405 memcpy(cmd->response, &p_csd_reg.reg, sizeof(cmd->response));
406 } else {
407 /* Fill response buffer */
408 p_csd_reg.reg.sdrsp10 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP10;
409 p_csd_reg.reg.sdrsp32 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP32;
410 p_csd_reg.reg.sdrsp54 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP54;
411 p_csd_reg.reg.sdrsp76 = (uint32_t)priv->sdmmc_ctrl.p_reg->SD_RSP76;
412
413 memcpy(cmd->response, &p_csd_reg.reg, sizeof(cmd->response));
414 }
415 end:
416 if (cmd->opcode == SD_APP_CMD) {
417 priv->app_cmd = true;
418 } else {
419 priv->app_cmd = false;
420 }
421
422 k_sem_give(&priv->thread_lock);
423
424 return ret;
425 }
426
sdhc_ra_reset(const struct device * dev)427 static int sdhc_ra_reset(const struct device *dev)
428 {
429 struct sdhc_ra_priv *priv = dev->data;
430 const struct sdhc_ra_config *cfg = dev->config;
431
432 k_sem_take(&priv->thread_lock, K_USEC(50));
433
434 /* Reset SDHI. */
435 ((R_SDHI0_Type *)cfg->regs)->SOFT_RST = 0x0U;
436 ((R_SDHI0_Type *)cfg->regs)->SOFT_RST = 0x1U;
437
438 k_sem_give(&priv->thread_lock);
439
440 return 0;
441 }
442
443 /*
444 * Set SDHC io properties
445 */
sdhc_ra_set_io(const struct device * dev,struct sdhc_io * ios)446 static int sdhc_ra_set_io(const struct device *dev, struct sdhc_io *ios)
447 {
448 struct sdhc_ra_priv *priv = dev->data;
449 const struct sdhc_ra_config *cfg = dev->config;
450 struct st_sdmmc_instance_ctrl *p_ctrl = &priv->sdmmc_ctrl;
451 int fsp_err;
452 int ret = 0;
453
454 uint8_t bus_width;
455 uint32_t bus_width_reg;
456
457 if (ios->bus_width > 0) {
458 bus_width_reg = 0;
459 /* Set bus width, SD bus interface doesn't support 8BIT */
460 switch (ios->bus_width) {
461 case SDHC_BUS_WIDTH1BIT:
462 bus_width = 1;
463 bus_width_reg = 4;
464 break;
465 case SDHC_BUS_WIDTH4BIT:
466 bus_width = 4;
467 break;
468 default:
469 ret = -ENOTSUP;
470 goto end;
471 }
472
473 if (priv->bus_width != bus_width) {
474 /* Set the bus width in the SDHI peripheral. */
475 ((R_SDHI0_Type *)cfg->regs)->SD_OPTION =
476 SDHI_PRV_SD_OPTION_DEFAULT |
477 (bus_width_reg << SDHI_PRV_SD_OPTION_WIDTH8_BIT);
478 priv->bus_width = bus_width;
479 }
480 }
481
482 if (ios->clock) {
483 if (ios->clock > priv->props.f_max || ios->clock < priv->props.f_min) {
484 LOG_ERR("Proposed clock outside supported host range");
485 return -EINVAL;
486 }
487
488 if (priv->bus_clock != (uint32_t)ios->clock) {
489 fsp_err = r_sdhi_max_clock_rate_set(p_ctrl, ios->clock);
490 ret = err_fsp2zep(fsp_err);
491 if (ret < 0) {
492 goto end;
493 }
494 priv->bus_clock = ios->clock;
495 }
496 }
497
498 if (ios->timing > 0) {
499 /* Set I/O timing */
500 if (priv->timing != ios->timing) {
501 switch (ios->timing) {
502 case SDHC_TIMING_LEGACY:
503 case SDHC_TIMING_HS:
504 case SDHC_TIMING_SDR12:
505 case SDHC_TIMING_SDR25:
506 break;
507 default:
508 LOG_ERR("Timing mode not supported for this device");
509 ret = -ENOTSUP;
510 break;
511 }
512
513 priv->timing = ios->timing;
514 }
515 }
516 end:
517
518 return ret;
519 }
520
521 /*
522 * Get host properties
523 */
sdhc_ra_get_host_props(const struct device * dev,struct sdhc_host_props * props)524 static int sdhc_ra_get_host_props(const struct device *dev, struct sdhc_host_props *props)
525 {
526 struct sdhc_ra_priv *priv = dev->data;
527
528 memcpy(props, &priv->props, sizeof(struct sdhc_host_props));
529 return 0;
530 }
531
sdhc_ra_init(const struct device * dev)532 static int sdhc_ra_init(const struct device *dev)
533 {
534 const struct sdhc_ra_config *config = dev->config;
535 struct sdhc_ra_priv *priv = dev->data;
536 fsp_err_t fsp_err;
537 int timeout = SDHI_PRV_ACCESS_TIMEOUT_US;
538 int ret = 0;
539
540 priv->sdmmc_event.transfer_completed = false;
541 k_sem_init(&priv->sdmmc_event.transfer_sem, 1, 1);
542
543 /* Configure dt provided device signals when available */
544 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
545 if (ret < 0) {
546 return ret;
547 }
548 if (priv->sdhi_en.port != NULL) {
549 int err = gpio_pin_configure_dt(&priv->sdhi_en, GPIO_OUTPUT_HIGH);
550
551 if (err) {
552 return err;
553 }
554 k_sleep(K_MSEC(50));
555 }
556
557 k_sem_init(&priv->thread_lock, 1, 1);
558 fsp_err = R_SDHI_Open(&priv->sdmmc_ctrl, &priv->fsp_config);
559 ret = err_fsp2zep(fsp_err);
560
561 if (ret < 0) {
562 LOG_INF("R_SDHI_Open error: %d", fsp_err);
563 return ret; /* I/O error*/
564 }
565
566 k_busy_wait(100);
567
568 k_sem_take(&priv->thread_lock, K_USEC(timeout));
569
570 fsp_err = r_sdhi_hw_cfg(&priv->sdmmc_ctrl);
571 ret = err_fsp2zep(fsp_err);
572 if (ret < 0) {
573 LOG_ERR("failed to init sdmmc media");
574 goto end;
575 }
576 priv->bus_width = SDMMC_BUS_WIDTH_1_BIT;
577 priv->timing = SDHC_TIMING_LEGACY;
578 priv->bus_clock = SDMMC_CLOCK_400KHZ;
579
580 end:
581 k_sem_give(&priv->thread_lock);
582 return ret;
583 }
584
585 static DEVICE_API(sdhc, sdhc_api) = {
586 .reset = sdhc_ra_reset,
587 .request = sdhc_ra_request,
588 .set_io = sdhc_ra_set_io,
589 .get_card_present = sdhc_ra_get_card_present,
590 .card_busy = sdhc_ra_card_busy,
591 .get_host_props = sdhc_ra_get_host_props,
592 };
593
594 #define _ELC_EVENT_SDMMC_ACCS(channel) ELC_EVENT_SDHIMMC##channel##_ACCS
595 #define _ELC_EVENT_SDMMC_CARD(channel) ELC_EVENT_SDHIMMC##channel##_CARD
596 #define _ELC_EVENT_SDMMC_DMA_REQ(channel) ELC_EVENT_SDHIMMC##channel##_DMA_REQ
597
598 #define ELC_EVENT_SDMMC_ACCS(channel) _ELC_EVENT_SDMMC_ACCS(channel)
599 #define ELC_EVENT_SDMMC_CARD(channel) _ELC_EVENT_SDMMC_CARD(channel)
600 #define ELC_EVENT_SDMMC_DMA_REQ(channel) _ELC_EVENT_SDMMC_DMA_REQ(channel)
601
602 #define RA_SDMMC_IRQ_CONFIG_INIT(index) \
603 do { \
604 ARG_UNUSED(dev); \
605 \
606 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, accs, irq)] = \
607 ELC_EVENT_SDMMC_ACCS(DT_INST_PROP(index, channel)); \
608 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, card, irq)] = \
609 ELC_EVENT_SDMMC_CARD(DT_INST_PROP(index, channel)); \
610 R_ICU->IELSR[DT_INST_IRQ_BY_NAME(index, dma_req, irq)] = \
611 ELC_EVENT_SDMMC_DMA_REQ(DT_INST_PROP(index, channel)); \
612 \
613 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, accs, irq), \
614 DT_INST_IRQ_BY_NAME(index, accs, priority), ra_sdmmc_accs_isr, \
615 DEVICE_DT_INST_GET(index), 0); \
616 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, card, irq), \
617 DT_INST_IRQ_BY_NAME(index, card, priority), ra_sdmmc_card_isr, \
618 DEVICE_DT_INST_GET(index), 0); \
619 IRQ_CONNECT(DT_INST_IRQ_BY_NAME(index, dma_req, irq), \
620 DT_INST_IRQ_BY_NAME(index, dma_req, priority), ra_sdmmc_dma_req_isr, \
621 DEVICE_DT_INST_GET(index), 0); \
622 \
623 irq_enable(DT_INST_IRQ_BY_NAME(index, accs, irq)); \
624 irq_enable(DT_INST_IRQ_BY_NAME(index, card, irq)); \
625 irq_enable(DT_INST_IRQ_BY_NAME(index, dma_req, irq)); \
626 } while (0)
627
628 #define RA_SDHI_EN(index) .sdhi_en = GPIO_DT_SPEC_INST_GET_OR(index, enable_gpios, {0})
629
630 #define RA_SDMMC_DTC_INIT(index) \
631 sdhc_ra_priv_##index.fsp_config.p_lower_lvl_transfer = &sdhc_ra_priv_##index.transfer;
632
633 #define RA_SDMMC_DTC_STRUCT_INIT(index) \
634 .transfer_info = \
635 { \
636 .transfer_settings_word_b.dest_addr_mode = TRANSFER_ADDR_MODE_FIXED, \
637 .transfer_settings_word_b.repeat_area = TRANSFER_REPEAT_AREA_SOURCE, \
638 .transfer_settings_word_b.irq = TRANSFER_IRQ_END, \
639 .transfer_settings_word_b.chain_mode = TRANSFER_CHAIN_MODE_DISABLED, \
640 .transfer_settings_word_b.src_addr_mode = TRANSFER_ADDR_MODE_INCREMENTED, \
641 .transfer_settings_word_b.size = TRANSFER_SIZE_4_BYTE, \
642 .transfer_settings_word_b.mode = TRANSFER_MODE_NORMAL, \
643 .p_dest = (void *)NULL, \
644 .p_src = (void const *)NULL, \
645 .num_blocks = 0, \
646 .length = 128, \
647 }, \
648 .transfer_cfg_extend = {.activation_source = DT_INST_IRQ_BY_NAME(index, dma_req, irq)}, \
649 .transfer_cfg = \
650 { \
651 .p_info = &sdhc_ra_priv_##index.transfer_info, \
652 .p_extend = &sdhc_ra_priv_##index.transfer_cfg_extend, \
653 }, \
654 .transfer = { \
655 .p_ctrl = &sdhc_ra_priv_##index.transfer_ctrl, \
656 .p_cfg = &sdhc_ra_priv_##index.transfer_cfg, \
657 .p_api = &g_transfer_on_dtc, \
658 },
659
660 #define RA_SDHC_INIT(index) \
661 \
662 PINCTRL_DT_INST_DEFINE(index); \
663 \
664 static const struct sdhc_ra_config sdhc_ra_config_##index = { \
665 .pcfg = PINCTRL_DT_INST_DEV_CONFIG_GET(index), \
666 .regs = (R_SDHI0_Type *)DT_INST_REG_ADDR(index), \
667 }; \
668 void r_sdhi_callback_##index(sdmmc_callback_args_t *p_args) \
669 { \
670 const struct device *dev = DEVICE_DT_INST_GET(index); \
671 struct sdhc_ra_priv *priv = dev->data; \
672 if (p_args->event == SDMMC_EVENT_TRANSFER_COMPLETE) { \
673 priv->sdmmc_event.transfer_completed = true; \
674 k_sem_give(&priv->sdmmc_event.transfer_sem); \
675 } else if (p_args->event == SDMMC_EVENT_TRANSFER_ERROR) { \
676 priv->sdmmc_event.transfer_completed = false; \
677 k_sem_give(&priv->sdmmc_event.transfer_sem); \
678 } \
679 } \
680 \
681 static struct sdhc_ra_priv sdhc_ra_priv_##index = { \
682 .power_mode = SDHC_POWER_ON, \
683 .timing = SDHC_TIMING_LEGACY, \
684 .fsp_config = \
685 { \
686 .channel = DT_INST_PROP(index, channel), \
687 .bus_width = DT_INST_PROP(index, bus_width), \
688 .access_ipl = DT_INST_IRQ_BY_NAME(index, accs, priority), \
689 .access_irq = DT_INST_IRQ_BY_NAME(index, accs, irq), \
690 .card_ipl = DT_INST_IRQ_BY_NAME(index, card, priority), \
691 .card_irq = DT_INST_IRQ_BY_NAME(index, card, irq), \
692 .dma_req_ipl = DT_INST_IRQ_BY_NAME(index, dma_req, priority), \
693 .dma_req_irq = DT_INST_IRQ_BY_NAME(index, dma_req, irq), \
694 .p_context = NULL, \
695 .p_callback = r_sdhi_callback_##index, \
696 .card_detect = DT_INST_PROP(index, card_detect), \
697 .write_protect = DT_INST_PROP(index, write_protect), \
698 .p_extend = NULL, \
699 .p_lower_lvl_transfer = &sdhc_ra_priv_##index.transfer, \
700 }, \
701 .props = {.is_spi = false, \
702 .f_max = DT_INST_PROP(index, max_bus_freq), \
703 .f_min = DT_INST_PROP(index, min_bus_freq), \
704 .max_current_330 = DT_INST_PROP(index, max_current_330), \
705 .max_current_180 = DT_INST_PROP(index, max_current_180), \
706 .power_delay = DT_INST_PROP_OR(index, power_delay_ms, 0), \
707 .host_caps = {.vol_180_support = false, \
708 .vol_300_support = false, \
709 .vol_330_support = true, \
710 .suspend_res_support = false, \
711 .sdma_support = true, \
712 .high_spd_support = (DT_INST_PROP(index, bus_width) == 4) \
713 ? true \
714 : false, \
715 .adma_2_support = false, \
716 .max_blk_len = 0, \
717 .ddr50_support = false, \
718 .sdr104_support = false, \
719 .sdr50_support = false, \
720 .bus_8_bit_support = false, \
721 .bus_4_bit_support = (DT_INST_PROP(index, bus_width) == 4) \
722 ? true \
723 : false, \
724 .hs200_support = false, \
725 .hs400_support = false}}, \
726 RA_SDHI_EN(index), \
727 RA_SDMMC_DTC_STRUCT_INIT(index)}; \
728 \
729 static int sdhc_ra_init##index(const struct device *dev) \
730 { \
731 RA_SDMMC_DTC_INIT(index); \
732 RA_SDMMC_IRQ_CONFIG_INIT(index); \
733 int err = sdhc_ra_init(dev); \
734 if (err != 0) { \
735 return err; \
736 } \
737 return 0; \
738 } \
739 \
740 DEVICE_DT_INST_DEFINE(index, sdhc_ra_init##index, NULL, &sdhc_ra_priv_##index, \
741 &sdhc_ra_config_##index, POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY, \
742 &sdhc_api);
743
744 DT_INST_FOREACH_STATUS_OKAY(RA_SDHC_INIT)
745