1 /*
2 * Copyright (c) 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT intel_emmc_host
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/devicetree.h>
11 #include <zephyr/drivers/sdhc.h>
12 #include <zephyr/sd/sd_spec.h>
13 #include <zephyr/cache.h>
14 #include "intel_emmc_host.h"
15 #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie)
16 BUILD_ASSERT(IS_ENABLED(CONFIG_PCIE), "DT need CONFIG_PCIE");
17 #include <zephyr/drivers/pcie/pcie.h>
18 #endif
19
20 #include <zephyr/logging/log.h>
21 LOG_MODULE_REGISTER(emmc_hc, CONFIG_SDHC_LOG_LEVEL);
22
23 typedef void (*emmc_isr_cb_t)(const struct device *dev);
24
25 #ifdef CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE
26 #define ADMA_DESC_SIZE CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE
27 #else
28 #define ADMA_DESC_SIZE 0
29 #endif
30
31 struct emmc_config {
32 #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie)
33 struct pcie_dev *pcie;
34 #else
35 DEVICE_MMIO_ROM;
36 #endif
37 emmc_isr_cb_t config_func;
38 uint32_t max_bus_freq;
39 uint32_t min_bus_freq;
40 uint32_t power_delay_ms;
41 uint8_t hs200_mode: 1;
42 uint8_t hs400_mode: 1;
43 uint8_t dw_4bit: 1;
44 uint8_t dw_8bit: 1;
45 };
46
47 struct emmc_data {
48 DEVICE_MMIO_RAM;
49 uint32_t rca;
50 struct sdhc_io host_io;
51 struct k_sem lock;
52 struct k_event irq_event;
53 uint64_t desc_table[ADMA_DESC_SIZE];
54 struct sdhc_host_props props;
55 bool card_present;
56 };
57
enable_interrupts(const struct device * dev)58 static void enable_interrupts(const struct device *dev)
59 {
60 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
61
62 regs->normal_int_stat_en = EMMC_HOST_NORMAL_INTR_MASK;
63 regs->err_int_stat_en = EMMC_HOST_ERROR_INTR_MASK;
64 regs->normal_int_signal_en = EMMC_HOST_NORMAL_INTR_MASK;
65 regs->err_int_signal_en = EMMC_HOST_ERROR_INTR_MASK;
66 regs->timeout_ctrl = EMMC_HOST_MAX_TIMEOUT;
67 }
68
disable_interrupts(const struct device * dev)69 static void disable_interrupts(const struct device *dev)
70 {
71 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
72
73 /* Keep enable interrupt status register to update */
74 regs->normal_int_stat_en = EMMC_HOST_NORMAL_INTR_MASK;
75 regs->err_int_stat_en = EMMC_HOST_ERROR_INTR_MASK;
76
77 /* Disable only interrupt generation */
78 regs->normal_int_signal_en &= 0;
79 regs->err_int_signal_en &= 0;
80 regs->timeout_ctrl = EMMC_HOST_MAX_TIMEOUT;
81 }
82
clear_interrupts(const struct device * dev)83 static void clear_interrupts(const struct device *dev)
84 {
85 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
86
87 regs->normal_int_stat = EMMC_HOST_NORMAL_INTR_MASK_CLR;
88 regs->err_int_stat = EMMC_HOST_ERROR_INTR_MASK;
89 }
90
emmc_set_voltage(const struct device * dev,enum sd_voltage signal_voltage)91 static int emmc_set_voltage(const struct device *dev, enum sd_voltage signal_voltage)
92 {
93 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
94 bool power_state = regs->power_ctrl & EMMC_HOST_POWER_CTRL_SD_BUS_POWER ? true : false;
95 int ret = 0;
96
97 if (power_state) {
98 /* Turn OFF Bus Power before config clock */
99 regs->power_ctrl &= ~EMMC_HOST_POWER_CTRL_SD_BUS_POWER;
100 }
101
102 switch (signal_voltage) {
103 case SD_VOL_3_3_V:
104 if (regs->capabilities & EMMC_HOST_VOL_3_3_V_SUPPORT) {
105 regs->host_ctrl2 &=
106 ~(EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC);
107
108 /* 3.3v voltage select */
109 regs->power_ctrl = EMMC_HOST_VOL_3_3_V_SELECT;
110 LOG_DBG("3.3V Selected for MMC Card");
111 } else {
112 LOG_ERR("3.3V not supported by MMC Host");
113 ret = -ENOTSUP;
114 }
115 break;
116
117 case SD_VOL_3_0_V:
118 if (regs->capabilities & EMMC_HOST_VOL_3_0_V_SUPPORT) {
119 regs->host_ctrl2 &=
120 ~(EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC);
121
122 /* 3.0v voltage select */
123 regs->power_ctrl = EMMC_HOST_VOL_3_0_V_SELECT;
124 LOG_DBG("3.0V Selected for MMC Card");
125 } else {
126 LOG_ERR("3.0V not supported by MMC Host");
127 ret = -ENOTSUP;
128 }
129 break;
130
131 case SD_VOL_1_8_V:
132 if (regs->capabilities & EMMC_HOST_VOL_1_8_V_SUPPORT) {
133 regs->host_ctrl2 |= EMMC_HOST_CTRL2_1P8V_SIG_EN
134 << EMMC_HOST_CTRL2_1P8V_SIG_LOC;
135
136 /* 1.8v voltage select */
137 regs->power_ctrl = EMMC_HOST_VOL_1_8_V_SELECT;
138 LOG_DBG("1.8V Selected for MMC Card");
139 } else {
140 LOG_ERR("1.8V not supported by MMC Host");
141 ret = -ENOTSUP;
142 }
143 break;
144
145 default:
146 ret = -EINVAL;
147 }
148
149 if (power_state) {
150 /* Turn ON Bus Power */
151 regs->power_ctrl |= EMMC_HOST_POWER_CTRL_SD_BUS_POWER;
152 }
153
154 return ret;
155 }
156
emmc_set_power(const struct device * dev,enum sdhc_power state)157 static int emmc_set_power(const struct device *dev, enum sdhc_power state)
158 {
159 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
160
161 if (state == SDHC_POWER_ON) {
162 /* Turn ON Bus Power */
163 regs->power_ctrl |= EMMC_HOST_POWER_CTRL_SD_BUS_POWER;
164 } else {
165 /* Turn OFF Bus Power */
166 regs->power_ctrl &= ~EMMC_HOST_POWER_CTRL_SD_BUS_POWER;
167 }
168
169 k_msleep(10u);
170
171 return 0;
172 }
173
emmc_disable_clock(const struct device * dev)174 static bool emmc_disable_clock(const struct device *dev)
175 {
176 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
177
178 if (regs->present_state & EMMC_HOST_PSTATE_CMD_INHIBIT) {
179 LOG_ERR("present_state:%x", regs->present_state);
180 return false;
181 }
182 if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) {
183 LOG_ERR("present_state:%x", regs->present_state);
184 return false;
185 }
186
187 regs->clock_ctrl &= ~EMMC_HOST_INTERNAL_CLOCK_EN;
188 regs->clock_ctrl &= ~EMMC_HOST_SD_CLOCK_EN;
189
190 while ((regs->clock_ctrl & EMMC_HOST_SD_CLOCK_EN) != 0) {
191 ;
192 }
193
194 return true;
195 }
196
emmc_enable_clock(const struct device * dev)197 static bool emmc_enable_clock(const struct device *dev)
198 {
199 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
200
201 regs->clock_ctrl |= EMMC_HOST_INTERNAL_CLOCK_EN;
202 /* Wait for the stable Internal Clock */
203 while ((regs->clock_ctrl & EMMC_HOST_INTERNAL_CLOCK_STABLE) == 0) {
204 ;
205 }
206
207 /* Enable SD Clock */
208 regs->clock_ctrl |= EMMC_HOST_SD_CLOCK_EN;
209 while ((regs->clock_ctrl & EMMC_HOST_SD_CLOCK_EN) == 0) {
210 ;
211 }
212
213 return true;
214 }
215
emmc_clock_set(const struct device * dev,enum sdhc_clock_speed speed)216 static bool emmc_clock_set(const struct device *dev, enum sdhc_clock_speed speed)
217 {
218 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
219 uint8_t base_freq;
220 uint32_t clock_divider;
221 float freq;
222 bool ret;
223
224 switch (speed) {
225 case SDMMC_CLOCK_400KHZ:
226 freq = EMMC_HOST_CLK_FREQ_400K;
227 break;
228
229 case SD_CLOCK_25MHZ:
230 case MMC_CLOCK_26MHZ:
231 freq = EMMC_HOST_CLK_FREQ_25M;
232 break;
233
234 case SD_CLOCK_50MHZ:
235 case MMC_CLOCK_52MHZ:
236 freq = EMMC_HOST_CLK_FREQ_50M;
237 break;
238
239 case SD_CLOCK_100MHZ:
240 freq = EMMC_HOST_CLK_FREQ_100M;
241 break;
242
243 case MMC_CLOCK_HS200:
244 freq = EMMC_HOST_CLK_FREQ_200M;
245 break;
246
247 case SD_CLOCK_208MHZ:
248 default:
249 return false;
250 }
251
252 ret = emmc_disable_clock(dev);
253 if (!ret) {
254 return false;
255 }
256
257 base_freq = regs->capabilities >> 8;
258 clock_divider = (int)(base_freq / (freq * 2));
259
260 LOG_DBG("Clock divider for MMC Clk: %d Hz is %d", speed, clock_divider);
261
262 SET_BITS(regs->clock_ctrl, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_LOC,
263 EMMC_HOST_CLK_SDCLCK_FREQ_SEL_MASK, clock_divider);
264 SET_BITS(regs->clock_ctrl, EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_LOC,
265 EMMC_HOST_CLK_SDCLCK_FREQ_SEL_UPPER_MASK, clock_divider >> 8);
266
267 emmc_enable_clock(dev);
268
269 return true;
270 }
271
set_timing(const struct device * dev,enum sdhc_timing_mode timing)272 static int set_timing(const struct device *dev, enum sdhc_timing_mode timing)
273 {
274 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
275 int ret = 0;
276 uint8_t mode;
277
278 LOG_DBG("UHS Mode: %d", timing);
279
280 switch (timing) {
281 case SDHC_TIMING_LEGACY:
282 case SDHC_TIMING_HS:
283 case SDHC_TIMING_SDR12:
284 mode = EMMC_HOST_UHSMODE_SDR12;
285 break;
286
287 case SDHC_TIMING_SDR25:
288 mode = EMMC_HOST_UHSMODE_SDR25;
289 break;
290
291 case SDHC_TIMING_SDR50:
292 mode = EMMC_HOST_UHSMODE_SDR50;
293 break;
294
295 case SDHC_TIMING_SDR104:
296 mode = EMMC_HOST_UHSMODE_SDR104;
297 break;
298
299 case SDHC_TIMING_DDR50:
300 case SDHC_TIMING_DDR52:
301 mode = EMMC_HOST_UHSMODE_DDR50;
302 break;
303
304 case SDHC_TIMING_HS400:
305 case SDHC_TIMING_HS200:
306 mode = EMMC_HOST_UHSMODE_HS400;
307 break;
308
309 default:
310 ret = -ENOTSUP;
311 }
312
313 if (!ret) {
314 if (!emmc_disable_clock(dev)) {
315 LOG_ERR("Disable clk failed");
316 return -EIO;
317 }
318 regs->host_ctrl2 |= EMMC_HOST_CTRL2_1P8V_SIG_EN << EMMC_HOST_CTRL2_1P8V_SIG_LOC;
319 SET_BITS(regs->host_ctrl2, EMMC_HOST_CTRL2_UHS_MODE_SEL_LOC,
320 EMMC_HOST_CTRL2_UHS_MODE_SEL_MASK, mode);
321
322 emmc_enable_clock(dev);
323 }
324
325 return ret;
326 }
327
wait_for_cmd_complete(struct emmc_data * emmc,uint32_t time_out)328 static int wait_for_cmd_complete(struct emmc_data *emmc, uint32_t time_out)
329 {
330 int ret;
331 k_timeout_t wait_time;
332 uint32_t events;
333
334 if (time_out == SDHC_TIMEOUT_FOREVER) {
335 wait_time = K_FOREVER;
336 } else {
337 wait_time = K_MSEC(time_out);
338 }
339
340 events = k_event_wait(&emmc->irq_event,
341 EMMC_HOST_CMD_COMPLETE | ERR_INTR_STATUS_EVENT(EMMC_HOST_ERR_STATUS),
342 false, wait_time);
343
344 if (events & EMMC_HOST_CMD_COMPLETE) {
345 ret = 0;
346 } else if (events & ERR_INTR_STATUS_EVENT(EMMC_HOST_ERR_STATUS)) {
347 LOG_ERR("wait for cmd complete error: %x", events);
348 ret = -EIO;
349 } else {
350 LOG_ERR("wait for cmd complete timeout");
351 ret = -EAGAIN;
352 }
353
354 return ret;
355 }
356
poll_cmd_complete(const struct device * dev,uint32_t time_out)357 static int poll_cmd_complete(const struct device *dev, uint32_t time_out)
358 {
359 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
360 int ret = -EAGAIN;
361 int32_t retry = time_out;
362
363 while (retry > 0) {
364 if (regs->normal_int_stat & EMMC_HOST_CMD_COMPLETE) {
365 regs->normal_int_stat = EMMC_HOST_CMD_COMPLETE;
366 ret = 0;
367 break;
368 }
369
370 k_busy_wait(1000u);
371 retry--;
372 }
373
374 if (regs->err_int_stat) {
375 LOG_ERR("err_int_stat:%x", regs->err_int_stat);
376 regs->err_int_stat &= regs->err_int_stat;
377 ret = -EIO;
378 }
379
380 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) {
381 if (regs->adma_err_stat) {
382 LOG_ERR("adma error: %x", regs->adma_err_stat);
383 ret = -EIO;
384 }
385 }
386 return ret;
387 }
388
emmc_host_sw_reset(const struct device * dev,enum emmc_sw_reset reset)389 void emmc_host_sw_reset(const struct device *dev, enum emmc_sw_reset reset)
390 {
391 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
392
393 if (reset == EMMC_HOST_SW_RESET_DATA_LINE) {
394 regs->sw_reset = EMMC_HOST_SW_RESET_REG_DATA;
395 } else if (reset == EMMC_HOST_SW_RESET_CMD_LINE) {
396 regs->sw_reset = EMMC_HOST_SW_RESET_REG_CMD;
397 } else if (reset == EMMC_HOST_SW_RESET_ALL) {
398 regs->sw_reset = EMMC_HOST_SW_RESET_REG_ALL;
399 }
400
401 while (regs->sw_reset != 0) {
402 ;
403 }
404
405 k_sleep(K_MSEC(100u));
406 }
407
emmc_dma_init(const struct device * dev,struct sdhc_data * data,bool read)408 static int emmc_dma_init(const struct device *dev, struct sdhc_data *data, bool read)
409 {
410 struct emmc_data *emmc = dev->data;
411 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
412
413 if (IS_ENABLED(CONFIG_DCACHE) && !read) {
414 sys_cache_data_flush_range(data->data, (data->blocks * data->block_size));
415 }
416
417 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) {
418 uint8_t *buff = data->data;
419
420 /* Setup DMA trasnfer using ADMA2 */
421 memset(emmc->desc_table, 0, sizeof(emmc->desc_table));
422
423 #if defined(CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE)
424 __ASSERT_NO_MSG(data->blocks < CONFIG_INTEL_EMMC_HOST_ADMA_DESC_SIZE);
425 #endif
426 for (int i = 0; i < data->blocks; i++) {
427 emmc->desc_table[i] = ((uint64_t)buff) << EMMC_HOST_ADMA_BUFF_ADD_LOC;
428 emmc->desc_table[i] |= data->block_size << EMMC_HOST_ADMA_BUFF_LEN_LOC;
429
430 if (i == (data->blocks - 1u)) {
431 emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LINK_LAST;
432 emmc->desc_table[i] |= EMMC_HOST_ADMA_INTR_EN;
433 emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LAST;
434 } else {
435 emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_LINK_NEXT;
436 }
437 emmc->desc_table[i] |= EMMC_HOST_ADMA_BUFF_VALID;
438 buff += data->block_size;
439 LOG_DBG("desc_table:%llx", emmc->desc_table[i]);
440 }
441
442 regs->adma_sys_addr1 = (uint32_t)((uintptr_t)emmc->desc_table & ADDRESS_32BIT_MASK);
443 regs->adma_sys_addr2 =
444 (uint32_t)(((uintptr_t)emmc->desc_table >> 32) & ADDRESS_32BIT_MASK);
445
446 LOG_DBG("adma: %llx %x %p", emmc->desc_table[0], regs->adma_sys_addr1,
447 emmc->desc_table);
448 } else {
449 /* Setup DMA trasnfer using SDMA */
450 regs->sdma_sysaddr = (uint32_t)((uintptr_t)data->data);
451 LOG_DBG("sdma_sysaddr: %x", regs->sdma_sysaddr);
452 }
453 return 0;
454 }
455
emmc_init_xfr(const struct device * dev,struct sdhc_data * data,bool read)456 static int emmc_init_xfr(const struct device *dev, struct sdhc_data *data, bool read)
457 {
458 struct emmc_data *emmc = dev->data;
459 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
460 uint16_t multi_block = 0u;
461
462 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) {
463 emmc_dma_init(dev, data, read);
464 }
465
466 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA)) {
467 SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DMA_SEL_LOC,
468 EMMC_HOST_CTRL1_DMA_SEL_MASK, 2u);
469 } else {
470 SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DMA_SEL_LOC,
471 EMMC_HOST_CTRL1_DMA_SEL_MASK, 0u);
472 }
473
474 /* Set Block Size Register */
475 SET_BITS(regs->block_size, EMMC_HOST_DMA_BUF_SIZE_LOC, EMMC_HOST_DMA_BUF_SIZE_MASK,
476 EMMC_HOST_SDMA_BOUNDARY);
477 SET_BITS(regs->block_size, EMMC_HOST_BLOCK_SIZE_LOC, EMMC_HOST_BLOCK_SIZE_MASK,
478 data->block_size);
479 if (data->blocks > 1) {
480 multi_block = 1u;
481 }
482 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) {
483 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_ADMA) &&
484 emmc->host_io.timing == SDHC_TIMING_SDR104) {
485 /* Auto cmd23 only applicable for ADMA */
486 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC,
487 EMMC_HOST_XFER_AUTO_CMD_EN_MASK, multi_block ? 2 : 0);
488 } else {
489 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC,
490 EMMC_HOST_XFER_AUTO_CMD_EN_MASK, multi_block ? 1 : 0);
491 }
492 } else {
493 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_AUTO_CMD_EN_LOC,
494 EMMC_HOST_XFER_AUTO_CMD_EN_MASK, 0);
495 }
496
497 if (!IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) {
498 /* Set block count regitser to 0 for infinite transfer mode */
499 regs->block_count = 0;
500 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_BLOCK_CNT_EN_LOC,
501 EMMC_HOST_XFER_BLOCK_CNT_EN_MASK, 0);
502 } else {
503 regs->block_count = (uint16_t)data->blocks;
504 /* Enable block count in transfer register */
505 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_BLOCK_CNT_EN_LOC,
506 EMMC_HOST_XFER_BLOCK_CNT_EN_MASK, multi_block ? 1 : 0);
507 }
508
509 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_MULTI_BLOCK_SEL_LOC,
510 EMMC_HOST_XFER_MULTI_BLOCK_SEL_MASK, multi_block);
511
512 /* Set data transfer direction, Read = 1, Write = 0 */
513 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DATA_DIR_LOC, EMMC_HOST_XFER_DATA_DIR_MASK,
514 read ? 1u : 0u);
515
516 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) {
517 /* Enable DMA or not */
518 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DMA_EN_LOC, EMMC_HOST_XFER_DMA_EN_MASK,
519 1u);
520 } else {
521 SET_BITS(regs->transfer_mode, EMMC_HOST_XFER_DMA_EN_LOC, EMMC_HOST_XFER_DMA_EN_MASK,
522 0u);
523 }
524
525 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_BLOCK_GAP)) {
526 /* Set an interrupt at the block gap */
527 SET_BITS(regs->block_gap_ctrl, EMMC_HOST_BLOCK_GAP_LOC, EMMC_HOST_BLOCK_GAP_MASK,
528 1u);
529 } else {
530 SET_BITS(regs->block_gap_ctrl, EMMC_HOST_BLOCK_GAP_LOC, EMMC_HOST_BLOCK_GAP_MASK,
531 0u);
532 }
533
534 /* Set data timeout time */
535 regs->timeout_ctrl = data->timeout_ms;
536
537 return 0;
538 }
539
wait_xfr_intr_complete(const struct device * dev,uint32_t time_out)540 static int wait_xfr_intr_complete(const struct device *dev, uint32_t time_out)
541 {
542 struct emmc_data *emmc = dev->data;
543 uint32_t events;
544 int ret;
545 k_timeout_t wait_time;
546
547 LOG_DBG("");
548
549 if (time_out == SDHC_TIMEOUT_FOREVER) {
550 wait_time = K_FOREVER;
551 } else {
552 wait_time = K_MSEC(time_out);
553 }
554
555 events = k_event_wait(&emmc->irq_event,
556 EMMC_HOST_XFER_COMPLETE |
557 ERR_INTR_STATUS_EVENT(EMMC_HOST_DMA_TXFR_ERR),
558 false, wait_time);
559
560 if (events & EMMC_HOST_XFER_COMPLETE) {
561 ret = 0;
562 } else if (events & ERR_INTR_STATUS_EVENT(0xFFFF)) {
563 LOG_ERR("wait for xfer complete error: %x", events);
564 ret = -EIO;
565 } else {
566 LOG_ERR("wait for xfer complete timeout");
567 ret = -EAGAIN;
568 }
569
570 return ret;
571 }
572
wait_xfr_poll_complete(const struct device * dev,uint32_t time_out)573 static int wait_xfr_poll_complete(const struct device *dev, uint32_t time_out)
574 {
575 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
576 int ret = -EAGAIN;
577 int32_t retry = time_out;
578
579 LOG_DBG("");
580
581 while (retry > 0) {
582 if (regs->normal_int_stat & EMMC_HOST_XFER_COMPLETE) {
583 regs->normal_int_stat |= EMMC_HOST_XFER_COMPLETE;
584 ret = 0;
585 break;
586 }
587
588 k_busy_wait(EMMC_HOST_MSEC_DELAY);
589 retry--;
590 }
591
592 return ret;
593 }
594
wait_xfr_complete(const struct device * dev,uint32_t time_out)595 static int wait_xfr_complete(const struct device *dev, uint32_t time_out)
596 {
597 int ret;
598
599 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
600 ret = wait_xfr_intr_complete(dev, time_out);
601 } else {
602 ret = wait_xfr_poll_complete(dev, time_out);
603 }
604 return ret;
605 }
606
emmc_decode_resp_type(enum sd_rsp_type type)607 static enum emmc_response_type emmc_decode_resp_type(enum sd_rsp_type type)
608 {
609 enum emmc_response_type resp_type;
610
611 switch (type & 0xF) {
612 case SD_RSP_TYPE_NONE:
613 resp_type = EMMC_HOST_RESP_NONE;
614 break;
615 case SD_RSP_TYPE_R1:
616 case SD_RSP_TYPE_R3:
617 case SD_RSP_TYPE_R4:
618 case SD_RSP_TYPE_R5:
619 resp_type = EMMC_HOST_RESP_LEN_48;
620 break;
621 case SD_RSP_TYPE_R1b:
622 resp_type = EMMC_HOST_RESP_LEN_48B;
623 break;
624 case SD_RSP_TYPE_R2:
625 resp_type = EMMC_HOST_RESP_LEN_136;
626 break;
627
628 case SD_RSP_TYPE_R5b:
629 case SD_RSP_TYPE_R6:
630 case SD_RSP_TYPE_R7:
631 default:
632 resp_type = EMMC_HOST_INVAL_HOST_RESP_LEN;
633 }
634
635 return resp_type;
636 }
637
update_cmd_response(const struct device * dev,struct sdhc_command * sdhc_cmd)638 static void update_cmd_response(const struct device *dev, struct sdhc_command *sdhc_cmd)
639 {
640 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
641 uint32_t resp0, resp1, resp2, resp3;
642
643 if (sdhc_cmd->response_type == SD_RSP_TYPE_NONE) {
644 return;
645 }
646
647 resp0 = regs->resp_01;
648
649 if (sdhc_cmd->response_type == SD_RSP_TYPE_R2) {
650 resp1 = regs->resp_2 | (regs->resp_3 << 16u);
651 resp2 = regs->resp_4 | (regs->resp_5 << 16u);
652 resp3 = regs->resp_6 | (regs->resp_7 << 16u);
653
654 LOG_DBG("cmd resp: %x %x %x %x", resp0, resp1, resp2, resp3);
655
656 sdhc_cmd->response[0u] = resp3;
657 sdhc_cmd->response[1U] = resp2;
658 sdhc_cmd->response[2U] = resp1;
659 sdhc_cmd->response[3U] = resp0;
660 } else {
661 LOG_DBG("cmd resp: %x", resp0);
662 sdhc_cmd->response[0u] = resp0;
663 }
664 }
665
emmc_host_send_cmd(const struct device * dev,const struct emmc_cmd_config * config)666 static int emmc_host_send_cmd(const struct device *dev, const struct emmc_cmd_config *config)
667 {
668 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
669 struct emmc_data *emmc = dev->data;
670 struct sdhc_command *sdhc_cmd = config->sdhc_cmd;
671 enum emmc_response_type resp_type = emmc_decode_resp_type(sdhc_cmd->response_type);
672 uint16_t cmd_reg;
673 int ret;
674
675 LOG_DBG("");
676
677 /* Check if CMD line is available */
678 if (regs->present_state & EMMC_HOST_PSTATE_CMD_INHIBIT) {
679 LOG_ERR("CMD line is not available");
680 return -EBUSY;
681 }
682
683 if (config->data_present && (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT)) {
684 LOG_ERR("Data line is not available");
685 return -EBUSY;
686 }
687
688 if (resp_type == EMMC_HOST_INVAL_HOST_RESP_LEN) {
689 LOG_ERR("Invalid eMMC resp type:%d", resp_type);
690 return -EINVAL;
691 }
692
693 k_event_clear(&emmc->irq_event, EMMC_HOST_CMD_COMPLETE);
694
695 regs->argument = sdhc_cmd->arg;
696
697 cmd_reg = config->cmd_idx << EMMC_HOST_CMD_INDEX_LOC |
698 config->cmd_type << EMMC_HOST_CMD_TYPE_LOC |
699 config->data_present << EMMC_HOST_CMD_DATA_PRESENT_LOC |
700 config->idx_check_en << EMMC_HOST_CMD_IDX_CHECK_EN_LOC |
701 config->crc_check_en << EMMC_HOST_CMD_CRC_CHECK_EN_LOC |
702 resp_type << EMMC_HOST_CMD_RESP_TYPE_LOC;
703 regs->cmd = cmd_reg;
704
705 LOG_DBG("CMD REG:%x %x", cmd_reg, regs->cmd);
706 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
707 ret = wait_for_cmd_complete(emmc, sdhc_cmd->timeout_ms);
708 } else {
709 ret = poll_cmd_complete(dev, sdhc_cmd->timeout_ms);
710 }
711 if (ret) {
712 LOG_ERR("Error on send cmd: %d, status:%d", config->cmd_idx, ret);
713 return ret;
714 }
715
716 update_cmd_response(dev, sdhc_cmd);
717
718 return 0;
719 }
720
emmc_stop_transfer(const struct device * dev)721 static int emmc_stop_transfer(const struct device *dev)
722 {
723 struct emmc_data *emmc = dev->data;
724 struct sdhc_command hdc_cmd = {0};
725 struct emmc_cmd_config cmd;
726
727 hdc_cmd.arg = emmc->rca << EMMC_HOST_RCA_SHIFT;
728 hdc_cmd.response_type = SD_RSP_TYPE_R1;
729 hdc_cmd.timeout_ms = 1000;
730
731 cmd.sdhc_cmd = &hdc_cmd;
732 cmd.cmd_idx = SD_STOP_TRANSMISSION;
733 cmd.cmd_type = EMMC_HOST_CMD_NORMAL;
734 cmd.data_present = false;
735 cmd.idx_check_en = false;
736 cmd.crc_check_en = false;
737
738 return emmc_host_send_cmd(dev, &cmd);
739 }
740
emmc_reset(const struct device * dev)741 static int emmc_reset(const struct device *dev)
742 {
743 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
744
745 LOG_DBG("");
746
747 if (!(regs->present_state & EMMC_HOST_PSTATE_CARD_INSERTED)) {
748 LOG_ERR("No EMMC card found");
749 return -ENODEV;
750 }
751
752 /* Reset device to idle state */
753 emmc_host_sw_reset(dev, EMMC_HOST_SW_RESET_ALL);
754
755 clear_interrupts(dev);
756
757 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
758 enable_interrupts(dev);
759 } else {
760 disable_interrupts(dev);
761 }
762
763 return 0;
764 }
765
read_data_port(const struct device * dev,struct sdhc_data * sdhc)766 static int read_data_port(const struct device *dev, struct sdhc_data *sdhc)
767 {
768 struct emmc_data *emmc = dev->data;
769 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
770 uint32_t block_size = sdhc->block_size;
771 uint32_t i, block_cnt = sdhc->blocks;
772 uint32_t *data = (uint32_t *)sdhc->data;
773 k_timeout_t wait_time;
774
775 if (sdhc->timeout_ms == SDHC_TIMEOUT_FOREVER) {
776 wait_time = K_FOREVER;
777 } else {
778 wait_time = K_MSEC(sdhc->timeout_ms);
779 }
780
781 LOG_DBG("");
782
783 while (block_cnt--) {
784 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
785 uint32_t events;
786
787 events = k_event_wait(&emmc->irq_event, EMMC_HOST_BUF_RD_READY, false,
788 wait_time);
789 k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_RD_READY);
790 if (!(events & EMMC_HOST_BUF_RD_READY)) {
791 LOG_ERR("time out on EMMC_HOST_BUF_RD_READY:%d",
792 (sdhc->blocks - block_cnt));
793 return -EIO;
794 }
795 } else {
796 while ((regs->present_state & EMMC_HOST_PSTATE_BUF_READ_EN) == 0) {
797 ;
798 }
799 }
800
801 if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) {
802 for (i = block_size >> 2u; i != 0u; i--) {
803 *data = regs->data_port;
804 data++;
805 }
806 }
807 }
808
809 return wait_xfr_complete(dev, sdhc->timeout_ms);
810 }
811
write_data_port(const struct device * dev,struct sdhc_data * sdhc)812 static int write_data_port(const struct device *dev, struct sdhc_data *sdhc)
813 {
814 struct emmc_data *emmc = dev->data;
815 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
816 uint32_t block_size = sdhc->block_size;
817 uint32_t i, block_cnt = sdhc->blocks;
818 uint32_t *data = (uint32_t *)sdhc->data;
819 k_timeout_t wait_time;
820
821 if (sdhc->timeout_ms == SDHC_TIMEOUT_FOREVER) {
822 wait_time = K_FOREVER;
823 } else {
824 wait_time = K_MSEC(sdhc->timeout_ms);
825 }
826
827 LOG_DBG("");
828
829 while ((regs->present_state & EMMC_HOST_PSTATE_BUF_WRITE_EN) == 0) {
830 ;
831 }
832
833 while (1) {
834 uint32_t events;
835
836 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
837 k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_WR_READY);
838 }
839
840 if (regs->present_state & EMMC_HOST_PSTATE_DAT_INHIBIT) {
841 for (i = block_size >> 2u; i != 0u; i--) {
842 regs->data_port = *data;
843 data++;
844 }
845 }
846
847 LOG_DBG("EMMC_HOST_BUF_WR_READY");
848
849 if (!(--block_cnt)) {
850 break;
851 }
852 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
853 events = k_event_wait(&emmc->irq_event, EMMC_HOST_BUF_WR_READY, false,
854 wait_time);
855 k_event_clear(&emmc->irq_event, EMMC_HOST_BUF_WR_READY);
856
857 if (!(events & EMMC_HOST_BUF_WR_READY)) {
858 LOG_ERR("time out on EMMC_HOST_BUF_WR_READY");
859 return -EIO;
860 }
861 } else {
862 while ((regs->present_state & EMMC_HOST_PSTATE_BUF_WRITE_EN) == 0) {
863 ;
864 }
865 }
866 }
867
868 return wait_xfr_complete(dev, sdhc->timeout_ms);
869 }
870
emmc_send_cmd_no_data(const struct device * dev,uint32_t cmd_idx,struct sdhc_command * cmd)871 static int emmc_send_cmd_no_data(const struct device *dev, uint32_t cmd_idx,
872 struct sdhc_command *cmd)
873 {
874 struct emmc_cmd_config emmc_cmd;
875
876 emmc_cmd.sdhc_cmd = cmd;
877 emmc_cmd.cmd_idx = cmd_idx;
878 emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL;
879 emmc_cmd.data_present = false;
880 emmc_cmd.idx_check_en = false;
881 emmc_cmd.crc_check_en = false;
882
883 return emmc_host_send_cmd(dev, &emmc_cmd);
884 }
885
emmc_send_cmd_data(const struct device * dev,uint32_t cmd_idx,struct sdhc_command * cmd,struct sdhc_data * data,bool read)886 static int emmc_send_cmd_data(const struct device *dev, uint32_t cmd_idx,
887 struct sdhc_command *cmd, struct sdhc_data *data, bool read)
888 {
889 struct emmc_cmd_config emmc_cmd;
890 int ret;
891
892 emmc_cmd.sdhc_cmd = cmd;
893 emmc_cmd.cmd_idx = cmd_idx;
894 emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL;
895 emmc_cmd.data_present = true;
896 emmc_cmd.idx_check_en = true;
897 emmc_cmd.crc_check_en = true;
898
899 ret = emmc_init_xfr(dev, data, read);
900 if (ret) {
901 LOG_ERR("Error on init xfr");
902 return ret;
903 }
904
905 ret = emmc_host_send_cmd(dev, &emmc_cmd);
906 if (ret) {
907 return ret;
908 }
909
910 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) {
911 ret = wait_xfr_complete(dev, data->timeout_ms);
912 } else {
913 if (read) {
914 ret = read_data_port(dev, data);
915 } else {
916 ret = write_data_port(dev, data);
917 }
918 }
919
920 return ret;
921 }
922
emmc_xfr(const struct device * dev,struct sdhc_command * cmd,struct sdhc_data * data,bool read)923 static int emmc_xfr(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data,
924 bool read)
925 {
926 struct emmc_data *emmc = dev->data;
927 int ret;
928 struct emmc_cmd_config emmc_cmd;
929
930 ret = emmc_init_xfr(dev, data, read);
931 if (ret) {
932 LOG_ERR("error emmc init xfr");
933 return ret;
934 }
935 emmc_cmd.sdhc_cmd = cmd;
936 emmc_cmd.cmd_type = EMMC_HOST_CMD_NORMAL;
937 emmc_cmd.data_present = true;
938 emmc_cmd.idx_check_en = true;
939 emmc_cmd.crc_check_en = true;
940
941 k_event_clear(&emmc->irq_event, EMMC_HOST_XFER_COMPLETE);
942 k_event_clear(&emmc->irq_event, read ? EMMC_HOST_BUF_RD_READY : EMMC_HOST_BUF_WR_READY);
943
944 if (data->blocks > 1) {
945 emmc_cmd.cmd_idx = read ? SD_READ_MULTIPLE_BLOCK : SD_WRITE_MULTIPLE_BLOCK;
946 ret = emmc_host_send_cmd(dev, &emmc_cmd);
947 } else {
948 emmc_cmd.cmd_idx = read ? SD_READ_SINGLE_BLOCK : SD_WRITE_SINGLE_BLOCK;
949 ret = emmc_host_send_cmd(dev, &emmc_cmd);
950 }
951
952 if (ret) {
953 return ret;
954 }
955
956 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_DMA)) {
957 ret = wait_xfr_complete(dev, data->timeout_ms);
958 } else {
959 if (read) {
960 ret = read_data_port(dev, data);
961 } else {
962 ret = write_data_port(dev, data);
963 }
964 }
965
966 if (!IS_ENABLED(CONFIG_INTEL_EMMC_HOST_AUTO_STOP)) {
967 emmc_stop_transfer(dev);
968 }
969 return ret;
970 }
971
emmc_request(const struct device * dev,struct sdhc_command * cmd,struct sdhc_data * data)972 static int emmc_request(const struct device *dev, struct sdhc_command *cmd, struct sdhc_data *data)
973 {
974 int ret;
975
976 LOG_DBG("");
977
978 if (data) {
979 switch (cmd->opcode) {
980 case SD_WRITE_SINGLE_BLOCK:
981 case SD_WRITE_MULTIPLE_BLOCK:
982 LOG_DBG("SD_WRITE_SINGLE_BLOCK");
983 ret = emmc_xfr(dev, cmd, data, false);
984 break;
985
986 case SD_READ_SINGLE_BLOCK:
987 case SD_READ_MULTIPLE_BLOCK:
988 LOG_DBG("SD_READ_SINGLE_BLOCK");
989 ret = emmc_xfr(dev, cmd, data, true);
990 break;
991
992 case MMC_SEND_EXT_CSD:
993 LOG_DBG("EMMC_HOST_SEND_EXT_CSD");
994 ret = emmc_send_cmd_data(dev, MMC_SEND_EXT_CSD, cmd, data, true);
995 break;
996
997 default:
998 ret = emmc_send_cmd_data(dev, cmd->opcode, cmd, data, true);
999 }
1000 } else {
1001 ret = emmc_send_cmd_no_data(dev, cmd->opcode, cmd);
1002 }
1003
1004 return ret;
1005 }
1006
emmc_set_io(const struct device * dev,struct sdhc_io * ios)1007 static int emmc_set_io(const struct device *dev, struct sdhc_io *ios)
1008 {
1009 struct emmc_data *emmc = dev->data;
1010 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1011 struct sdhc_io *host_io = &emmc->host_io;
1012 int ret;
1013
1014 LOG_DBG("emmc I/O: DW %d, Clk %d Hz, card power state %s, voltage %s", ios->bus_width,
1015 ios->clock, ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF",
1016 ios->signal_voltage == SD_VOL_1_8_V ? "1.8V" : "3.3V");
1017
1018 if (ios->clock && (ios->clock > emmc->props.f_max || ios->clock < emmc->props.f_min)) {
1019 LOG_ERR("Invalid argument for clock freq: %d Support max:%d and Min:%d", ios->clock,
1020 emmc->props.f_max, emmc->props.f_min);
1021 return -EINVAL;
1022 }
1023
1024 /* Set HC clock */
1025 if (host_io->clock != ios->clock) {
1026 LOG_DBG("Clock: %d", host_io->clock);
1027 if (ios->clock != 0) {
1028 /* Enable clock */
1029 LOG_DBG("CLOCK: %d", ios->clock);
1030 if (!emmc_clock_set(dev, ios->clock)) {
1031 return -ENOTSUP;
1032 }
1033 } else {
1034 emmc_disable_clock(dev);
1035 }
1036 host_io->clock = ios->clock;
1037 }
1038
1039 /* Set data width */
1040 if (host_io->bus_width != ios->bus_width) {
1041 LOG_DBG("bus_width: %d", host_io->bus_width);
1042
1043 if (ios->bus_width == SDHC_BUS_WIDTH4BIT) {
1044 SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_EXT_DAT_WIDTH_LOC,
1045 EMMC_HOST_CTRL1_EXT_DAT_WIDTH_MASK,
1046 ios->bus_width == SDHC_BUS_WIDTH8BIT ? 1 : 0);
1047 } else {
1048 SET_BITS(regs->host_ctrl1, EMMC_HOST_CTRL1_DAT_WIDTH_LOC,
1049 EMMC_HOST_CTRL1_DAT_WIDTH_MASK,
1050 ios->bus_width == SDHC_BUS_WIDTH4BIT ? 1 : 0);
1051 }
1052 host_io->bus_width = ios->bus_width;
1053 }
1054
1055 /* Set HC signal voltage */
1056 if (ios->signal_voltage != host_io->signal_voltage) {
1057 LOG_DBG("signal_voltage: %d", ios->signal_voltage);
1058 ret = emmc_set_voltage(dev, ios->signal_voltage);
1059 if (ret) {
1060 LOG_ERR("Set signal volatge failed:%d", ret);
1061 return ret;
1062 }
1063 host_io->signal_voltage = ios->signal_voltage;
1064 }
1065
1066 /* Set card power */
1067 if (host_io->power_mode != ios->power_mode) {
1068 LOG_DBG("power_mode: %d", ios->power_mode);
1069
1070 ret = emmc_set_power(dev, ios->power_mode);
1071 if (ret) {
1072 LOG_ERR("Set Bus power failed:%d", ret);
1073 return ret;
1074 }
1075 host_io->power_mode = ios->power_mode;
1076 }
1077
1078 /* Set I/O timing */
1079 if (host_io->timing != ios->timing) {
1080 LOG_DBG("timing: %d", ios->timing);
1081
1082 ret = set_timing(dev, ios->timing);
1083 if (ret) {
1084 LOG_ERR("Set timing failed:%d", ret);
1085 return ret;
1086 }
1087 host_io->timing = ios->timing;
1088 }
1089
1090 return 0;
1091 }
1092
emmc_get_card_present(const struct device * dev)1093 static int emmc_get_card_present(const struct device *dev)
1094 {
1095 struct emmc_data *emmc = dev->data;
1096 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1097
1098 LOG_DBG("");
1099
1100 emmc->card_present = (bool)((regs->present_state >> 16u) & 1u);
1101
1102 if (!emmc->card_present) {
1103 LOG_ERR("No MMC device detected");
1104 }
1105
1106 return ((int)emmc->card_present);
1107 }
1108
emmc_execute_tuning(const struct device * dev)1109 static int emmc_execute_tuning(const struct device *dev)
1110 {
1111 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_TUNING)) {
1112 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1113
1114 LOG_DBG("Tuning starting...");
1115
1116 regs->host_ctrl2 |= EMMC_HOST_START_TUNING;
1117 while (!(regs->host_ctrl2 & EMMC_HOST_START_TUNING)) {
1118 ;
1119 }
1120
1121 if (regs->host_ctrl2 & EMMC_HOST_TUNING_SUCCESS) {
1122 LOG_DBG("Tuning Completed success");
1123 } else {
1124 LOG_ERR("Tuning failed");
1125 return -EIO;
1126 }
1127 }
1128 return 0;
1129 }
1130
emmc_card_busy(const struct device * dev)1131 static int emmc_card_busy(const struct device *dev)
1132 {
1133 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1134
1135 LOG_DBG("");
1136
1137 if (regs->present_state & 7u) {
1138 return 1;
1139 }
1140
1141 return 0;
1142 }
1143
emmc_get_host_props(const struct device * dev,struct sdhc_host_props * props)1144 static int emmc_get_host_props(const struct device *dev, struct sdhc_host_props *props)
1145 {
1146 struct emmc_data *emmc = dev->data;
1147 const struct emmc_config *config = dev->config;
1148 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1149 uint64_t cap = regs->capabilities;
1150
1151 LOG_DBG("");
1152
1153 memset(props, 0, sizeof(struct sdhc_host_props));
1154 props->f_max = config->max_bus_freq;
1155 props->f_min = config->min_bus_freq;
1156 props->power_delay = config->power_delay_ms;
1157
1158 props->host_caps.vol_180_support = (bool)(cap & BIT(26u));
1159 props->host_caps.vol_300_support = (bool)(cap & BIT(25u));
1160 props->host_caps.vol_330_support = (bool)(bool)(cap & BIT(24u));
1161 props->host_caps.suspend_res_support = false;
1162 props->host_caps.sdma_support = (bool)(cap & BIT(22u));
1163 props->host_caps.high_spd_support = (bool)(cap & BIT(21u));
1164 props->host_caps.adma_2_support = (bool)(cap & BIT(19u));
1165
1166 props->host_caps.max_blk_len = (cap >> 16u) & 0x3u;
1167 props->host_caps.ddr50_support = (bool)(cap & BIT(34u));
1168 props->host_caps.sdr104_support = (bool)(cap & BIT(33u));
1169 props->host_caps.sdr50_support = (bool)(cap & BIT(32u));
1170 props->host_caps.bus_8_bit_support = true;
1171 props->host_caps.bus_4_bit_support = true;
1172 props->host_caps.hs200_support = (bool)config->hs200_mode;
1173 props->host_caps.hs400_support = (bool)config->hs400_mode;
1174
1175 emmc->props = *props;
1176
1177 return 0;
1178 }
1179
emmc_isr(const struct device * dev)1180 static void emmc_isr(const struct device *dev)
1181 {
1182 struct emmc_data *emmc = dev->data;
1183 volatile struct emmc_reg *regs = (struct emmc_reg *)DEVICE_MMIO_GET(dev);
1184
1185 if (regs->normal_int_stat & EMMC_HOST_CMD_COMPLETE) {
1186 regs->normal_int_stat |= EMMC_HOST_CMD_COMPLETE;
1187 k_event_post(&emmc->irq_event, EMMC_HOST_CMD_COMPLETE);
1188 }
1189
1190 if (regs->normal_int_stat & EMMC_HOST_XFER_COMPLETE) {
1191 regs->normal_int_stat |= EMMC_HOST_XFER_COMPLETE;
1192 k_event_post(&emmc->irq_event, EMMC_HOST_XFER_COMPLETE);
1193 }
1194
1195 if (regs->normal_int_stat & EMMC_HOST_DMA_INTR) {
1196 regs->normal_int_stat |= EMMC_HOST_DMA_INTR;
1197 k_event_post(&emmc->irq_event, EMMC_HOST_DMA_INTR);
1198 }
1199
1200 if (regs->normal_int_stat & EMMC_HOST_BUF_WR_READY) {
1201 regs->normal_int_stat |= EMMC_HOST_BUF_WR_READY;
1202 k_event_post(&emmc->irq_event, EMMC_HOST_BUF_WR_READY);
1203 }
1204
1205 if (regs->normal_int_stat & EMMC_HOST_BUF_RD_READY) {
1206 regs->normal_int_stat |= EMMC_HOST_BUF_RD_READY;
1207 k_event_post(&emmc->irq_event, EMMC_HOST_BUF_RD_READY);
1208 }
1209
1210 if (regs->err_int_stat) {
1211 LOG_ERR("err int:%x", regs->err_int_stat);
1212 k_event_post(&emmc->irq_event, ERR_INTR_STATUS_EVENT(regs->err_int_stat));
1213 if (regs->err_int_stat & EMMC_HOST_DMA_TXFR_ERR) {
1214 regs->err_int_stat |= EMMC_HOST_DMA_TXFR_ERR;
1215 } else {
1216 regs->err_int_stat |= regs->err_int_stat;
1217 }
1218 }
1219
1220 if (regs->normal_int_stat) {
1221 k_event_post(&emmc->irq_event, regs->normal_int_stat);
1222 regs->normal_int_stat |= regs->normal_int_stat;
1223 }
1224
1225 if (regs->adma_err_stat) {
1226 LOG_ERR("adma err:%x", regs->adma_err_stat);
1227 }
1228 }
1229
emmc_init(const struct device * dev)1230 static int emmc_init(const struct device *dev)
1231 {
1232 struct emmc_data *emmc = dev->data;
1233 const struct emmc_config *config = dev->config;
1234
1235 k_sem_init(&emmc->lock, 1, 1);
1236 k_event_init(&emmc->irq_event);
1237
1238 #if DT_ANY_INST_ON_BUS_STATUS_OKAY(pcie)
1239 if (config->pcie) {
1240 struct pcie_bar mbar;
1241
1242 if (config->pcie->bdf == PCIE_BDF_NONE) {
1243 LOG_ERR("Cannot probe eMMC PCI device: %x", config->pcie->id);
1244 return -ENODEV;
1245 }
1246
1247 if (!pcie_probe_mbar(config->pcie->bdf, 0, &mbar)) {
1248 LOG_ERR("eMMC MBAR not found");
1249 return -EINVAL;
1250 }
1251
1252 pcie_get_mbar(config->pcie->bdf, 0, &mbar);
1253 pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MEM, true);
1254 device_map(DEVICE_MMIO_RAM_PTR(dev), mbar.phys_addr, mbar.size, K_MEM_CACHE_NONE);
1255 pcie_set_cmd(config->pcie->bdf, PCIE_CONF_CMDSTAT_MASTER, true);
1256 } else
1257 #endif
1258 {
1259 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
1260 }
1261
1262 LOG_DBG("MMC Device MMIO: %p", (void *)(struct emmc_reg *)DEVICE_MMIO_GET(dev));
1263
1264 if (IS_ENABLED(CONFIG_INTEL_EMMC_HOST_INTR)) {
1265 config->config_func(dev);
1266 }
1267 return emmc_reset(dev);
1268 }
1269
1270 static DEVICE_API(sdhc, emmc_api) = {
1271 .reset = emmc_reset,
1272 .request = emmc_request,
1273 .set_io = emmc_set_io,
1274 .get_card_present = emmc_get_card_present,
1275 .execute_tuning = emmc_execute_tuning,
1276 .card_busy = emmc_card_busy,
1277 .get_host_props = emmc_get_host_props,
1278 };
1279
1280 #define EMMC_HOST_IRQ_FLAGS_SENSE0(n) 0
1281 #define EMMC_HOST_IRQ_FLAGS_SENSE1(n) DT_INST_IRQ(n, sense)
1282 #define EMMC_HOST_IRQ_FLAGS(n)\
1283 _CONCAT(EMMC_HOST_IRQ_FLAGS_SENSE, DT_INST_IRQ_HAS_CELL(n, sense))(n)
1284
1285 /* Not PCI(e) */
1286 #define EMMC_HOST_IRQ_CONFIG_PCIE0(n) \
1287 static void emmc_config_##n(const struct device *port) \
1288 { \
1289 ARG_UNUSED(port); \
1290 IRQ_CONNECT(DT_INST_IRQN(n), DT_INST_IRQ(n, priority), emmc_isr, \
1291 DEVICE_DT_INST_GET(n), EMMC_HOST_IRQ_FLAGS(n)); \
1292 irq_enable(DT_INST_IRQN(n)); \
1293 }
1294
1295 /* PCI(e) with auto IRQ detection */
1296 #define EMMC_HOST_IRQ_CONFIG_PCIE1(n) \
1297 static void emmc_config_##n(const struct device *port) \
1298 { \
1299 BUILD_ASSERT(DT_INST_IRQN(n) == PCIE_IRQ_DETECT, \
1300 "Only runtime IRQ configuration is supported"); \
1301 BUILD_ASSERT(IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS), \
1302 "eMMC PCI device needs CONFIG_DYNAMIC_INTERRUPTS"); \
1303 const struct emmc_config *const dev_cfg = port->config; \
1304 unsigned int irq = pcie_alloc_irq(dev_cfg->pcie->bdf); \
1305 \
1306 if (irq == PCIE_CONF_INTR_IRQ_NONE) { \
1307 return; \
1308 } \
1309 pcie_connect_dynamic_irq(dev_cfg->pcie->bdf, irq, DT_INST_IRQ(n, priority), \
1310 (void (*)(const void *))emmc_isr, DEVICE_DT_INST_GET(n), \
1311 EMMC_HOST_IRQ_FLAGS(n)); \
1312 pcie_irq_enable(dev_cfg->pcie->bdf, irq); \
1313 }
1314
1315 #define EMMC_HOST_IRQ_CONFIG(n) _CONCAT(EMMC_HOST_IRQ_CONFIG_PCIE, DT_INST_ON_BUS(n, pcie))(n)
1316
1317 #define INIT_PCIE0(n)
1318 #define INIT_PCIE1(n) DEVICE_PCIE_INST_INIT(n, pcie),
1319 #define INIT_PCIE(n) _CONCAT(INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n)
1320
1321 #define REG_INIT_PCIE0(n) DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)),
1322 #define REG_INIT_PCIE1(n)
1323 #define REG_INIT(n) _CONCAT(REG_INIT_PCIE, DT_INST_ON_BUS(n, pcie))(n)
1324
1325 #define DEFINE_PCIE0(n)
1326 #define DEFINE_PCIE1(n) DEVICE_PCIE_INST_DECLARE(n)
1327 #define EMMC_HOST_PCIE_DEFINE(n) _CONCAT(DEFINE_PCIE, DT_INST_ON_BUS(n, pcie))(n)
1328
1329 #define EMMC_HOST_DEV_CFG(n) \
1330 EMMC_HOST_PCIE_DEFINE(n); \
1331 EMMC_HOST_IRQ_CONFIG(n); \
1332 static const struct emmc_config emmc_config_data_##n = { \
1333 REG_INIT(n) INIT_PCIE(n).config_func = emmc_config_##n, \
1334 .hs200_mode = DT_INST_PROP_OR(n, mmc_hs200_1_8v, 0), \
1335 .hs400_mode = DT_INST_PROP_OR(n, mmc_hs400_1_8v, 0), \
1336 .dw_4bit = DT_INST_ENUM_HAS_VALUE(n, bus_width, 4), \
1337 .dw_8bit = DT_INST_ENUM_HAS_VALUE(n, bus_width, 8), \
1338 .max_bus_freq = DT_INST_PROP_OR(n, max_bus_freq, 40000), \
1339 .min_bus_freq = DT_INST_PROP_OR(n, min_bus_freq, 40000), \
1340 .power_delay_ms = DT_INST_PROP_OR(n, power_delay_ms, 500), \
1341 }; \
1342 \
1343 static struct emmc_data emmc_priv_data_##n; \
1344 \
1345 DEVICE_DT_INST_DEFINE(n, emmc_init, NULL, &emmc_priv_data_##n, &emmc_config_data_##n, \
1346 POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEVICE, &emmc_api);
1347
1348 DT_INST_FOREACH_STATUS_OKAY(EMMC_HOST_DEV_CFG)
1349