1 /*
2  * Copyright (c) 2024 Espressif Systems (Shanghai) Co., Ltd.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #define DT_DRV_COMPAT espressif_esp32_sdhc_slot
8 
9 #include <zephyr/kernel.h>
10 #include <zephyr/drivers/sdhc.h>
11 #include <zephyr/drivers/gpio.h>
12 #include <zephyr/logging/log.h>
13 #include <soc.h>
14 #include <zephyr/drivers/pinctrl.h>
15 #include <zephyr/drivers/clock_control.h>
16 #include <zephyr/drivers/interrupt_controller/intc_esp32.h>
17 
18 #include <esp_clk_tree.h>
19 #include <hal/sdmmc_ll.h>
20 #include <esp_intr_alloc.h>
21 #include <esp_timer.h>
22 #include <hal/gpio_hal.h>
23 #include <hal/rtc_io_hal.h>
24 #include <soc/sdmmc_reg.h>
25 #include <esp_memory_utils.h>
26 
27 #include "sdhc_esp32.h"
28 
29 LOG_MODULE_REGISTER(sdhc, CONFIG_SDHC_LOG_LEVEL);
30 
31 #define SDMMC_SLOT_WIDTH_DEFAULT 1
32 
33 #define SDMMC_HOST_CLOCK_UPDATE_CMD_TIMEOUT_US 1000 * 1000
34 #define SDMMC_HOST_RESET_TIMEOUT_US            5000 * 1000
35 #define SDMMC_HOST_START_CMD_TIMEOUT_US        1000 * 1000
36 #define SDMMC_HOST_WAIT_EVENT_TIMEOUT_US       1000 * 1000
37 
38 #define SDMMC_EVENT_QUEUE_LENGTH 32
39 
40 #define SDMMC_TIMEOUT_MAX 0xFFFFFFFF
41 
42 /* Number of DMA descriptors used for transfer.
43  * Increasing this value above 4 doesn't improve performance for the usual case
44  * of SD memory cards (most data transfers are multiples of 512 bytes).
45  */
46 #define SDMMC_DMA_DESC_CNT 4
47 
48 /* mask for card current state */
49 #define MMC_R1_CURRENT_STATE(resp) (((resp)[0] >> 9) & 0xf)
50 
51 struct sdhc_esp32_config {
52 
53 	int slot;
54 	const sdmmc_dev_t *sdio_hw;
55 	const struct device *clock_dev;
56 	const clock_control_subsys_t clock_subsys;
57 	const struct pinctrl_dev_config *pcfg;
58 	const struct gpio_dt_spec pwr_gpio;
59 	/*
60 	 * Pins below are only defined for ESP32. For SoC's with GPIO matrix feature
61 	 * please use pinctrl for pin configuration.
62 	 */
63 	const int clk_pin;
64 	const int cmd_pin;
65 	const int d0_pin;
66 	const int d1_pin;
67 	const int d2_pin;
68 	const int d3_pin;
69 
70 	int irq_source;
71 	int irq_priority;
72 	int irq_flags;
73 	uint8_t bus_width_cfg;
74 
75 	struct sdhc_host_props props;
76 };
77 
78 struct sdhc_esp32_data {
79 
80 	uint8_t bus_width;  /* Bus width used by the slot (can change during execution) */
81 	uint32_t bus_clock; /* Value in Hz. ESP-IDF functions use kHz instead */
82 
83 	enum sdhc_power power_mode;
84 	enum sdhc_timing_mode timing;
85 
86 	struct host_ctx s_host_ctx;
87 	struct k_mutex s_request_mutex;
88 	bool s_is_app_cmd;
89 	sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
90 	struct sdmmc_transfer_state s_cur_transfer;
91 };
92 
93 /**********************************************************************
94  * ESP32 low level functions
95  **********************************************************************/
96 
97 /* We have two clock divider stages:
98  * - one is the clock generator which drives SDMMC peripheral,
99  *   it can be configured using sdio_hw->clock register. It can generate
100  *   frequencies 160MHz/(N + 1), where 0 < N < 16, I.e. from 10 to 80 MHz.
101  * - 4 clock dividers inside SDMMC peripheral, which can divide clock
102  *   from the first stage by 2 * M, where 0 < M < 255
103  *   (they can also be bypassed).
104  *
105  * For cards which aren't UHS-1 or UHS-2 cards, which we don't support,
106  * maximum bus frequency in high speed (HS) mode is 50 MHz.
107  * Note: for non-UHS-1 cards, HS mode is optional.
108  * Default speed (DS) mode is mandatory, it works up to 25 MHz.
109  * Whether the card supports HS or not can be determined using TRAN_SPEED
110  * field of card's CSD register.
111  *
112  * 50 MHz can not be obtained exactly, closest we can get is 53 MHz.
113  *
114  * The first stage divider is set to the highest possible value for the given
115  * frequency, and the second stage dividers are used if division factor
116  * is >16.
117  *
118  * Of the second stage dividers, div0 is used for card 0, and div1 is used
119  * for card 1.
120  */
sdmmc_host_set_clk_div(sdmmc_dev_t * sdio_hw,int div)121 static int sdmmc_host_set_clk_div(sdmmc_dev_t *sdio_hw, int div)
122 {
123 	if (!((div > 1) && (div <= 16))) {
124 		LOG_ERR("Invalid parameter 'div'");
125 		return ESP_ERR_INVALID_ARG;
126 	}
127 
128 	sdmmc_ll_set_clock_div(sdio_hw, div);
129 	sdmmc_ll_select_clk_source(sdio_hw, SDMMC_CLK_SRC_DEFAULT);
130 	sdmmc_ll_init_phase_delay(sdio_hw);
131 
132 	/* Wait for the clock to propagate */
133 	esp_rom_delay_us(10);
134 
135 	return 0;
136 }
137 
sdmmc_host_dma_init(sdmmc_dev_t * sdio_hw)138 static void sdmmc_host_dma_init(sdmmc_dev_t *sdio_hw)
139 {
140 	sdio_hw->ctrl.dma_enable = 1;
141 	sdio_hw->bmod.val = 0;
142 	sdio_hw->bmod.sw_reset = 1;
143 	sdio_hw->idinten.ni = 1;
144 	sdio_hw->idinten.ri = 1;
145 	sdio_hw->idinten.ti = 1;
146 }
147 
sdmmc_host_dma_stop(sdmmc_dev_t * sdio_hw)148 static void sdmmc_host_dma_stop(sdmmc_dev_t *sdio_hw)
149 {
150 	sdio_hw->ctrl.use_internal_dma = 0;
151 	sdio_hw->ctrl.dma_reset = 1;
152 	sdio_hw->bmod.fb = 0;
153 	sdio_hw->bmod.enable = 0;
154 }
155 
sdmmc_host_transaction_handler_init(struct sdhc_esp32_data * data)156 static int sdmmc_host_transaction_handler_init(struct sdhc_esp32_data *data)
157 {
158 	k_mutex_init(&data->s_request_mutex);
159 
160 	data->s_is_app_cmd = false;
161 
162 	return 0;
163 }
164 
sdmmc_host_wait_for_event(struct sdhc_esp32_data * data,int timeout_ms,struct sdmmc_event * out_event)165 static int sdmmc_host_wait_for_event(struct sdhc_esp32_data *data, int timeout_ms,
166 				     struct sdmmc_event *out_event)
167 {
168 	if (!out_event) {
169 		return ESP_ERR_INVALID_ARG;
170 	}
171 
172 	if (!data->s_host_ctx.event_queue) {
173 		return ESP_ERR_INVALID_STATE;
174 	}
175 
176 	int ret = k_msgq_get(data->s_host_ctx.event_queue, out_event, K_MSEC(timeout_ms));
177 
178 	return ret;
179 }
180 
handle_idle_state_events(struct sdhc_esp32_data * data)181 static int handle_idle_state_events(struct sdhc_esp32_data *data)
182 {
183 	/* Handle any events which have happened in between transfers.
184 	 * Under current assumptions (no SDIO support) only card detect events
185 	 * can happen in the idle state.
186 	 */
187 	struct sdmmc_event evt;
188 
189 	int64_t yield_delay_us = 100 * 1000; /* initially 100ms */
190 	int64_t t0 = esp_timer_get_time();
191 	int64_t t1 = 0;
192 
193 	while (sdmmc_host_wait_for_event(data, 0, &evt) == 0) {
194 
195 		if (evt.sdmmc_status & SDMMC_INTMASK_CD) {
196 			LOG_DBG("card detect event");
197 			evt.sdmmc_status &= ~SDMMC_INTMASK_CD;
198 		}
199 
200 		if (evt.sdmmc_status != 0 || evt.dma_status != 0) {
201 			LOG_DBG("%s unhandled: %08" PRIx32 " %08" PRIx32, __func__,
202 				evt.sdmmc_status, evt.dma_status);
203 		}
204 
205 		/* Loop timeout */
206 		t1 = esp_timer_get_time();
207 
208 		if (t1 - t0 > SDMMC_HOST_WAIT_EVENT_TIMEOUT_US) {
209 			return ESP_ERR_TIMEOUT;
210 		}
211 
212 		if (t1 - t0 > yield_delay_us) {
213 			yield_delay_us *= 2;
214 			k_sleep(K_MSEC(1));
215 		}
216 	}
217 
218 	return 0;
219 }
220 
fill_dma_descriptors(struct sdhc_esp32_data * data,size_t num_desc)221 static void fill_dma_descriptors(struct sdhc_esp32_data *data, size_t num_desc)
222 {
223 	for (size_t i = 0; i < num_desc; ++i) {
224 		if (data->s_cur_transfer.size_remaining == 0) {
225 			return;
226 		}
227 
228 		const size_t next = data->s_cur_transfer.next_desc;
229 		sdmmc_desc_t *desc = &data->s_dma_desc[next];
230 
231 		if (desc->owned_by_idmac) {
232 			return;
233 		}
234 
235 		size_t size_to_fill = (data->s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN)
236 					      ? data->s_cur_transfer.size_remaining
237 					      : SDMMC_DMA_MAX_BUF_LEN;
238 
239 		bool last = size_to_fill == data->s_cur_transfer.size_remaining;
240 
241 		desc->last_descriptor = last;
242 		desc->second_address_chained = 1;
243 		desc->owned_by_idmac = 1;
244 		desc->buffer1_ptr = data->s_cur_transfer.ptr;
245 		desc->next_desc_ptr =
246 			(last) ? NULL : &data->s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
247 
248 		if (!((size_to_fill < 4) || ((size_to_fill % 4) == 0))) {
249 			return;
250 		}
251 
252 		desc->buffer1_size = (size_to_fill + 3) & (~3);
253 
254 		data->s_cur_transfer.size_remaining -= size_to_fill;
255 		data->s_cur_transfer.ptr += size_to_fill;
256 		data->s_cur_transfer.next_desc =
257 			(data->s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
258 
259 		LOG_DBG("fill %d desc=%d rem=%d next=%d last=%d sz=%d", num_desc, next,
260 			data->s_cur_transfer.size_remaining, data->s_cur_transfer.next_desc,
261 			desc->last_descriptor, desc->buffer1_size);
262 	}
263 }
264 
sdmmc_host_dma_resume(sdmmc_dev_t * sdio_hw)265 static void sdmmc_host_dma_resume(sdmmc_dev_t *sdio_hw)
266 {
267 	sdmmc_ll_poll_demand(sdio_hw);
268 }
269 
sdmmc_host_dma_prepare(sdmmc_dev_t * sdio_hw,sdmmc_desc_t * desc,size_t block_size,size_t data_size)270 static void sdmmc_host_dma_prepare(sdmmc_dev_t *sdio_hw, sdmmc_desc_t *desc, size_t block_size,
271 				   size_t data_size)
272 {
273 	/* Set size of data and DMA descriptor pointer */
274 	sdmmc_ll_set_data_transfer_len(sdio_hw, data_size);
275 	sdmmc_ll_set_block_size(sdio_hw, block_size);
276 	sdmmc_ll_set_desc_addr(sdio_hw, (uint32_t)desc);
277 
278 	/* Enable everything needed to use DMA */
279 	sdmmc_ll_enable_dma(sdio_hw, true);
280 	sdmmc_host_dma_resume(sdio_hw);
281 }
282 
sdmmc_host_start_command(sdmmc_dev_t * sdio_hw,int slot,sdmmc_hw_cmd_t cmd,uint32_t arg)283 static int sdmmc_host_start_command(sdmmc_dev_t *sdio_hw, int slot, sdmmc_hw_cmd_t cmd,
284 				    uint32_t arg)
285 {
286 	if (!(slot == 0 || slot == 1)) {
287 		return ESP_ERR_INVALID_ARG;
288 	}
289 	if (!sdmmc_ll_is_card_detected(sdio_hw, slot)) {
290 		return ESP_ERR_NOT_FOUND;
291 	}
292 	if (cmd.data_expected && cmd.rw && sdmmc_ll_is_card_write_protected(sdio_hw, slot)) {
293 		return ESP_ERR_INVALID_STATE;
294 	}
295 	/* Outputs should be synchronized to cclk_out */
296 	cmd.use_hold_reg = 1;
297 
298 	int64_t yield_delay_us = 100 * 1000; /* initially 100ms */
299 	int64_t t0 = esp_timer_get_time();
300 	int64_t t1 = 0;
301 
302 	while (sdio_hw->cmd.start_command == 1) {
303 		t1 = esp_timer_get_time();
304 
305 		if (t1 - t0 > SDMMC_HOST_START_CMD_TIMEOUT_US) {
306 			return ESP_ERR_TIMEOUT;
307 		}
308 		if (t1 - t0 > yield_delay_us) {
309 			yield_delay_us *= 2;
310 			k_sleep(K_MSEC(1));
311 		}
312 	}
313 
314 	sdio_hw->cmdarg = arg;
315 	cmd.card_num = slot;
316 	cmd.start_command = 1;
317 	sdio_hw->cmd = cmd;
318 
319 	return ESP_OK;
320 }
321 
process_command_response(sdmmc_dev_t * sdio_hw,uint32_t status,struct sdmmc_command * cmd)322 static void process_command_response(sdmmc_dev_t *sdio_hw, uint32_t status,
323 				     struct sdmmc_command *cmd)
324 {
325 	if (cmd->flags & SCF_RSP_PRESENT) {
326 		if (cmd->flags & SCF_RSP_136) {
327 			/* Destination is 4-byte aligned, can memcopy from peripheral registers */
328 			memcpy(cmd->response, (uint32_t *)sdio_hw->resp, 4 * sizeof(uint32_t));
329 		} else {
330 			cmd->response[0] = sdio_hw->resp[0];
331 			cmd->response[1] = 0;
332 			cmd->response[2] = 0;
333 			cmd->response[3] = 0;
334 		}
335 	}
336 
337 	int err = ESP_OK;
338 
339 	if (status & SDMMC_INTMASK_RTO) {
340 		/* response timeout is only possible when response is expected */
341 		if (!(cmd->flags & SCF_RSP_PRESENT)) {
342 			return;
343 		}
344 
345 		err = ESP_ERR_TIMEOUT;
346 	} else if ((cmd->flags & SCF_RSP_CRC) && (status & SDMMC_INTMASK_RCRC)) {
347 		err = ESP_ERR_INVALID_CRC;
348 	} else if (status & SDMMC_INTMASK_RESP_ERR) {
349 		err = ESP_ERR_INVALID_RESPONSE;
350 	}
351 
352 	if (err != ESP_OK) {
353 		cmd->error = err;
354 		if (cmd->data) {
355 			sdmmc_host_dma_stop(sdio_hw);
356 		}
357 		LOG_DBG("%s: error 0x%x  (status=%08" PRIx32 ")", __func__, err, status);
358 	}
359 }
360 
process_data_status(sdmmc_dev_t * sdio_hw,uint32_t status,struct sdmmc_command * cmd)361 static void process_data_status(sdmmc_dev_t *sdio_hw, uint32_t status, struct sdmmc_command *cmd)
362 {
363 	if (status & SDMMC_DATA_ERR_MASK) {
364 		if (status & SDMMC_INTMASK_DTO) {
365 			cmd->error = ESP_ERR_TIMEOUT;
366 		} else if (status & SDMMC_INTMASK_DCRC) {
367 			cmd->error = ESP_ERR_INVALID_CRC;
368 		} else if ((status & SDMMC_INTMASK_EBE) && (cmd->flags & SCF_CMD_READ) == 0) {
369 			cmd->error = ESP_ERR_TIMEOUT;
370 		} else {
371 			cmd->error = ESP_FAIL;
372 		}
373 		sdio_hw->ctrl.fifo_reset = 1;
374 	}
375 
376 	if (cmd->error != 0) {
377 		if (cmd->data) {
378 			sdmmc_host_dma_stop(sdio_hw);
379 		}
380 		LOG_DBG("%s: error 0x%x (status=%08" PRIx32 ")", __func__, cmd->error, status);
381 	}
382 }
383 
mask_check_and_clear(uint32_t * state,uint32_t mask)384 static inline bool mask_check_and_clear(uint32_t *state, uint32_t mask)
385 {
386 	bool ret = ((*state) & mask) != 0;
387 
388 	*state &= ~mask;
389 
390 	return ret;
391 }
392 
get_free_descriptors_count(struct sdhc_esp32_data * data)393 static size_t get_free_descriptors_count(struct sdhc_esp32_data *data)
394 {
395 	const size_t next = data->s_cur_transfer.next_desc;
396 	size_t count = 0;
397 
398 	/* Starting with the current DMA descriptor, count the number of
399 	 * descriptors which have 'owned_by_idmac' set to 0. These are the
400 	 * descriptors already processed by the DMA engine.
401 	 */
402 	for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
403 		sdmmc_desc_t *desc = &data->s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
404 
405 		if (desc->owned_by_idmac) {
406 			break;
407 		}
408 		++count;
409 		if (desc->next_desc_ptr == NULL) {
410 			/* final descriptor in the chain */
411 			break;
412 		}
413 	}
414 
415 	return count;
416 }
417 
process_events(const struct device * dev,struct sdmmc_event evt,struct sdmmc_command * cmd,enum sdmmc_req_state * pstate,struct sdmmc_event * unhandled_events)418 static int process_events(const struct device *dev, struct sdmmc_event evt,
419 			  struct sdmmc_command *cmd, enum sdmmc_req_state *pstate,
420 			  struct sdmmc_event *unhandled_events)
421 {
422 	const struct sdhc_esp32_config *cfg = dev->config;
423 	struct sdhc_esp32_data *data = dev->data;
424 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
425 
426 	const char *const s_state_names[]
427 		__attribute__((unused)) = {"IDLE", "SENDING_CMD", "SENDIND_DATA", "BUSY"};
428 	struct sdmmc_event orig_evt = evt;
429 
430 	LOG_DBG("%s: state=%s evt=%" PRIx32 " dma=%" PRIx32, __func__, s_state_names[*pstate],
431 		evt.sdmmc_status, evt.dma_status);
432 
433 	enum sdmmc_req_state next_state = *pstate;
434 	enum sdmmc_req_state state = (enum sdmmc_req_state) -1;
435 
436 	while (next_state != state) {
437 
438 		state = next_state;
439 
440 		switch (state) {
441 
442 		case SDMMC_IDLE:
443 			break;
444 
445 		case SDMMC_SENDING_CMD:
446 			if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_CMD_ERR_MASK)) {
447 				process_command_response(sdio_hw, orig_evt.sdmmc_status, cmd);
448 				/*
449 				 * In addition to the error interrupt, CMD_DONE will also be
450 				 * reported. It may occur immediately (in the same sdmmc_event_t) or
451 				 * be delayed until the next interrupt
452 				 */
453 			}
454 			if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_CMD_DONE)) {
455 				process_command_response(sdio_hw, orig_evt.sdmmc_status, cmd);
456 				if (cmd->error != ESP_OK) {
457 					next_state = SDMMC_IDLE;
458 					break;
459 				}
460 
461 				if (cmd->data == NULL) {
462 					next_state = SDMMC_IDLE;
463 				} else {
464 					next_state = SDMMC_SENDING_DATA;
465 				}
466 			}
467 			break;
468 
469 		case SDMMC_SENDING_DATA:
470 			if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_DATA_ERR_MASK)) {
471 				process_data_status(sdio_hw, orig_evt.sdmmc_status, cmd);
472 				sdmmc_host_dma_stop(sdio_hw);
473 			}
474 			if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
475 
476 				data->s_cur_transfer.desc_remaining--;
477 
478 				if (data->s_cur_transfer.size_remaining) {
479 
480 					int desc_to_fill = get_free_descriptors_count(data);
481 
482 					fill_dma_descriptors(data, desc_to_fill);
483 					sdmmc_host_dma_resume(sdio_hw);
484 				}
485 				if (data->s_cur_transfer.desc_remaining == 0) {
486 					next_state = SDMMC_BUSY;
487 				}
488 			}
489 			if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
490 				/* On start bit error, DATA_DONE interrupt will not be generated */
491 				next_state = SDMMC_IDLE;
492 				break;
493 			}
494 			break;
495 
496 		case SDMMC_BUSY:
497 			if (!mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_DATA_OVER)) {
498 				break;
499 			}
500 			process_data_status(sdio_hw, orig_evt.sdmmc_status, cmd);
501 			next_state = SDMMC_IDLE;
502 			break;
503 		}
504 		LOG_DBG("%s state=%s next_state=%s", __func__, s_state_names[state],
505 			s_state_names[next_state]);
506 	}
507 
508 	*pstate = state;
509 	*unhandled_events = evt;
510 
511 	return ESP_OK;
512 }
513 
handle_event(const struct device * dev,struct sdmmc_command * cmd,enum sdmmc_req_state * state,struct sdmmc_event * unhandled_events)514 static int handle_event(const struct device *dev, struct sdmmc_command *cmd,
515 			enum sdmmc_req_state *state, struct sdmmc_event *unhandled_events)
516 {
517 	const struct sdhc_esp32_config *cfg = dev->config;
518 	struct sdhc_esp32_data *data = dev->data;
519 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
520 	struct sdmmc_event event;
521 
522 	int err = sdmmc_host_wait_for_event(data, cmd->timeout_ms, &event);
523 
524 	if (err != 0) {
525 		LOG_ERR("sdmmc_handle_event: sdmmc_host_wait_for_event returned 0x%x, timeout %d "
526 			"ms",
527 			err, cmd->timeout_ms);
528 		if (err == -EAGAIN) {
529 			sdmmc_host_dma_stop(sdio_hw);
530 		}
531 		return err;
532 	}
533 
534 	LOG_DBG("sdmmc_handle_event: event %08" PRIx32 " %08" PRIx32 ", unhandled %08" PRIx32
535 		" %08" PRIx32,
536 		event.sdmmc_status, event.dma_status, unhandled_events->sdmmc_status,
537 		unhandled_events->dma_status);
538 
539 	event.sdmmc_status |= unhandled_events->sdmmc_status;
540 	event.dma_status |= unhandled_events->dma_status;
541 
542 	process_events(dev, event, cmd, state, unhandled_events);
543 	LOG_DBG("sdmmc_handle_event: events unhandled: %08" PRIx32 " %08" PRIx32,
544 		unhandled_events->sdmmc_status, unhandled_events->dma_status);
545 
546 	return ESP_OK;
547 }
548 
wait_for_busy_cleared(const sdmmc_dev_t * sdio_hw,uint32_t timeout_ms)549 static bool wait_for_busy_cleared(const sdmmc_dev_t *sdio_hw, uint32_t timeout_ms)
550 {
551 	if (timeout_ms == 0) {
552 		return !(sdio_hw->status.data_busy == 1);
553 	}
554 
555 	/* It would have been nice to do this without polling, however the peripheral
556 	 * can only generate Busy Clear Interrupt for data write commands, and waiting
557 	 * for busy clear is mostly needed for other commands such as MMC_SWITCH.
558 	 */
559 	uint32_t timeout_ticks = k_ms_to_ticks_ceil32(timeout_ms);
560 
561 	while (timeout_ticks-- > 0) {
562 		if (!(sdio_hw->status.data_busy == 1)) {
563 			return true;
564 		}
565 		k_sleep(K_MSEC(1));
566 	}
567 
568 	return false;
569 }
570 
cmd_needs_auto_stop(const struct sdmmc_command * cmd)571 static bool cmd_needs_auto_stop(const struct sdmmc_command *cmd)
572 {
573 	/* SDMMC host needs an "auto stop" flag for the following commands: */
574 	return cmd->datalen > 0 &&
575 	       (cmd->opcode == SD_WRITE_MULTIPLE_BLOCK || cmd->opcode == SD_READ_MULTIPLE_BLOCK);
576 }
577 
make_hw_cmd(struct sdmmc_command * cmd)578 static sdmmc_hw_cmd_t make_hw_cmd(struct sdmmc_command *cmd)
579 {
580 	sdmmc_hw_cmd_t res = {0};
581 
582 	res.cmd_index = cmd->opcode;
583 	if (cmd->opcode == SD_STOP_TRANSMISSION) {
584 		res.stop_abort_cmd = 1;
585 	} else if (cmd->opcode == SD_GO_IDLE_STATE) {
586 		res.send_init = 1;
587 	} else {
588 		res.wait_complete = 1;
589 	}
590 	if (cmd->opcode == SD_GO_IDLE_STATE) {
591 		res.send_init = 1;
592 	}
593 	if (cmd->flags & SCF_RSP_PRESENT) {
594 		res.response_expect = 1;
595 		if (cmd->flags & SCF_RSP_136) {
596 			res.response_long = 1;
597 		}
598 	}
599 	if (cmd->flags & SCF_RSP_CRC) {
600 		res.check_response_crc = 1;
601 	}
602 	if (cmd->data) {
603 		res.data_expected = 1;
604 
605 		if ((cmd->flags & SCF_CMD_READ) == 0) {
606 			res.rw = 1;
607 		}
608 
609 		if ((cmd->datalen % cmd->blklen) != 0) {
610 			return res; /* Error situation, data will be invalid */
611 		}
612 
613 		res.send_auto_stop = cmd_needs_auto_stop(cmd) ? 1 : 0;
614 	}
615 	LOG_DBG("%s: opcode=%d, rexp=%d, crc=%d, auto_stop=%d", __func__, res.cmd_index,
616 		res.response_expect, res.check_response_crc, res.send_auto_stop);
617 
618 	return res;
619 }
620 
sdmmc_host_do_transaction(const struct device * dev,int slot,struct sdmmc_command * cmdinfo)621 static int sdmmc_host_do_transaction(const struct device *dev, int slot,
622 				     struct sdmmc_command *cmdinfo)
623 {
624 	const struct sdhc_esp32_config *cfg = dev->config;
625 	struct sdhc_esp32_data *data = dev->data;
626 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
627 	int ret;
628 
629 	if (k_mutex_lock(&data->s_request_mutex, K_FOREVER) != 0) {
630 		return ESP_ERR_NO_MEM;
631 	}
632 
633 	/* dispose of any events which happened asynchronously */
634 	handle_idle_state_events(data);
635 
636 	/* convert cmdinfo to hardware register value */
637 	sdmmc_hw_cmd_t hw_cmd = make_hw_cmd(cmdinfo);
638 
639 	if (cmdinfo->data) {
640 		/* Length should be either <4 or >=4 and =0 (mod 4) */
641 		if ((cmdinfo->datalen >= 4) && (cmdinfo->datalen % 4) != 0) {
642 			LOG_DBG("%s: invalid size: total=%d", __func__, cmdinfo->datalen);
643 			ret = ESP_ERR_INVALID_SIZE;
644 			goto out;
645 		}
646 
647 		if ((((intptr_t)cmdinfo->data % 4) != 0) || !esp_ptr_dma_capable(cmdinfo->data)) {
648 			LOG_DBG("%s: buffer %p can not be used for DMA", __func__, cmdinfo->data);
649 			ret = ESP_ERR_INVALID_ARG;
650 			goto out;
651 		}
652 
653 		/* this clears "owned by IDMAC" bits */
654 		memset(data->s_dma_desc, 0, sizeof(data->s_dma_desc));
655 
656 		/* initialize first descriptor */
657 		data->s_dma_desc[0].first_descriptor = 1;
658 
659 		/* save transfer info */
660 		data->s_cur_transfer.ptr = (uint8_t *)cmdinfo->data;
661 		data->s_cur_transfer.size_remaining = cmdinfo->datalen;
662 		data->s_cur_transfer.next_desc = 0;
663 		data->s_cur_transfer.desc_remaining =
664 			(cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
665 
666 		/* prepare descriptors */
667 		fill_dma_descriptors(data, SDMMC_DMA_DESC_CNT);
668 
669 		/* write transfer info into hardware */
670 		sdmmc_host_dma_prepare(sdio_hw, &data->s_dma_desc[0], cmdinfo->blklen,
671 				       cmdinfo->datalen);
672 	}
673 
674 	/* write command into hardware, this also sends the command to the card */
675 	ret = sdmmc_host_start_command(sdio_hw, slot, hw_cmd, cmdinfo->arg);
676 
677 	if (ret != ESP_OK) {
678 		goto out;
679 	}
680 
681 	/* process events until transfer is complete */
682 	cmdinfo->error = ESP_OK;
683 
684 	enum sdmmc_req_state state = SDMMC_SENDING_CMD;
685 	struct sdmmc_event unhandled_events = {0};
686 
687 	while (state != SDMMC_IDLE) {
688 		ret = handle_event(dev, cmdinfo, &state, &unhandled_events);
689 		if (ret != 0) {
690 			break;
691 		}
692 	}
693 
694 	if (ret == 0 && (cmdinfo->flags & SCF_WAIT_BUSY)) {
695 		if (!wait_for_busy_cleared(sdio_hw, cmdinfo->timeout_ms)) {
696 			ret = ESP_ERR_TIMEOUT;
697 		}
698 	}
699 
700 	data->s_is_app_cmd = (ret == ESP_OK && cmdinfo->opcode == SD_APP_CMD);
701 
702 out:
703 
704 	k_mutex_unlock(&data->s_request_mutex);
705 
706 	return ret;
707 }
708 
sdmmc_host_clock_update_command(sdmmc_dev_t * sdio_hw,int slot)709 static int sdmmc_host_clock_update_command(sdmmc_dev_t *sdio_hw, int slot)
710 {
711 	int ret;
712 	bool repeat = true;
713 
714 	/* Clock update command (not a real command; just updates CIU registers) */
715 	sdmmc_hw_cmd_t cmd_val = {.card_num = slot, .update_clk_reg = 1, .wait_complete = 1};
716 
717 	while (repeat) {
718 
719 		ret = sdmmc_host_start_command(sdio_hw, slot, cmd_val, 0);
720 
721 		if (ret != 0) {
722 			return ret;
723 		}
724 
725 		int64_t yield_delay_us = 100 * 1000; /* initially 100ms */
726 		int64_t t0 = esp_timer_get_time();
727 		int64_t t1 = 0;
728 
729 		while (true) {
730 			t1 = esp_timer_get_time();
731 
732 			if (t1 - t0 > SDMMC_HOST_CLOCK_UPDATE_CMD_TIMEOUT_US) {
733 				return ESP_ERR_TIMEOUT;
734 			}
735 			/* Sending clock update command to the CIU can generate HLE error */
736 			/* According to the manual, this is okay and we must retry the command */
737 			if (sdio_hw->rintsts.hle) {
738 				sdio_hw->rintsts.hle = 1;
739 				repeat = true;
740 				break;
741 			}
742 			/* When the command is accepted by CIU, start_command bit will be */
743 			/* cleared in sdio_hw->cmd register */
744 			if (sdio_hw->cmd.start_command == 0) {
745 				repeat = false;
746 				break;
747 			}
748 			if (t1 - t0 > yield_delay_us) {
749 				yield_delay_us *= 2;
750 				k_sleep(K_MSEC(1));
751 			}
752 		}
753 	}
754 
755 	return 0;
756 }
757 
sdmmc_host_get_clk_dividers(uint32_t freq_khz,int * host_div,int * card_div)758 void sdmmc_host_get_clk_dividers(uint32_t freq_khz, int *host_div, int *card_div)
759 {
760 	uint32_t clk_src_freq_hz = 0;
761 
762 	esp_clk_tree_src_get_freq_hz(SDMMC_CLK_SRC_DEFAULT, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED,
763 				     &clk_src_freq_hz);
764 	assert(clk_src_freq_hz == (160 * 1000 * 1000));
765 
766 	/* Calculate new dividers */
767 	if (freq_khz >= SDMMC_FREQ_HIGHSPEED) {
768 		*host_div = 4; /* 160 MHz / 4 = 40 MHz */
769 		*card_div = 0;
770 	} else if (freq_khz == SDMMC_FREQ_DEFAULT) {
771 		*host_div = 8; /* 160 MHz / 8 = 20 MHz */
772 		*card_div = 0;
773 	} else if (freq_khz == SDMMC_FREQ_PROBING) {
774 		*host_div = 10; /* 160 MHz / 10 / (20 * 2) = 400 kHz */
775 		*card_div = 20;
776 	} else {
777 		/*
778 		 * for custom frequencies use maximum range of host divider (1-16), find the closest
779 		 * <= div. combination if exceeded, combine with the card divider to keep reasonable
780 		 * precision (applies mainly to low frequencies) effective frequency range: 400 kHz
781 		 * - 32 MHz (32.1 - 39.9 MHz cannot be covered with given divider scheme)
782 		 */
783 		*host_div = (clk_src_freq_hz) / (freq_khz * 1000);
784 		if (*host_div > 15) {
785 			*host_div = 2;
786 			*card_div = (clk_src_freq_hz / 2) / (2 * freq_khz * 1000);
787 			if (((clk_src_freq_hz / 2) % (2 * freq_khz * 1000)) > 0) {
788 				(*card_div)++;
789 			}
790 		} else if ((clk_src_freq_hz % (freq_khz * 1000)) > 0) {
791 			(*host_div)++;
792 		}
793 	}
794 }
795 
sdmmc_host_calc_freq(const int host_div,const int card_div)796 static int sdmmc_host_calc_freq(const int host_div, const int card_div)
797 {
798 	uint32_t clk_src_freq_hz = 0;
799 
800 	esp_clk_tree_src_get_freq_hz(SDMMC_CLK_SRC_DEFAULT, ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED,
801 				     &clk_src_freq_hz);
802 	assert(clk_src_freq_hz == (160 * 1000 * 1000));
803 
804 	return clk_src_freq_hz / host_div / ((card_div == 0) ? 1 : card_div * 2) / 1000;
805 }
806 
sdmmc_host_set_card_clk(sdmmc_dev_t * sdio_hw,int slot,uint32_t freq_khz)807 int sdmmc_host_set_card_clk(sdmmc_dev_t *sdio_hw, int slot, uint32_t freq_khz)
808 {
809 	if (!(slot == 0 || slot == 1)) {
810 		return ESP_ERR_INVALID_ARG;
811 	}
812 
813 	/* Disable clock first */
814 	sdmmc_ll_enable_card_clock(sdio_hw, slot, false);
815 	int err = sdmmc_host_clock_update_command(sdio_hw, slot);
816 
817 	if (err != 0) {
818 		LOG_ERR("disabling clk failed");
819 		LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err);
820 		return err;
821 	}
822 
823 	int host_div = 0; /* clock divider of the host (sdio_hw->clock) */
824 	int card_div = 0; /* 1/2 of card clock divider (sdio_hw->clkdiv) */
825 
826 	sdmmc_host_get_clk_dividers(freq_khz, &host_div, &card_div);
827 
828 	int real_freq = sdmmc_host_calc_freq(host_div, card_div);
829 
830 	LOG_DBG("slot=%d host_div=%d card_div=%d freq=%dkHz (max %" PRIu32 "kHz)", slot, host_div,
831 		card_div, real_freq, freq_khz);
832 
833 	/* Program card clock settings, send them to the CIU */
834 	sdmmc_ll_set_card_clock_div(sdio_hw, slot, card_div);
835 	err = sdmmc_host_set_clk_div(sdio_hw, host_div);
836 
837 	if (err != 0) {
838 		return err;
839 	}
840 
841 	err = sdmmc_host_clock_update_command(sdio_hw, slot);
842 
843 	if (err != 0) {
844 		LOG_ERR("setting clk div failed");
845 		LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err);
846 		return err;
847 	}
848 
849 	/* Re-enable clocks */
850 	sdmmc_ll_enable_card_clock(sdio_hw, slot, true);
851 	sdmmc_ll_enable_card_clock_low_power(sdio_hw, slot, true);
852 
853 	err = sdmmc_host_clock_update_command(sdio_hw, slot);
854 
855 	if (err != 0) {
856 		LOG_ERR("re-enabling clk failed");
857 		LOG_ERR("%s: sdmmc_host_clock_update_command returned 0x%x", __func__, err);
858 		return err;
859 	}
860 
861 	/* set data timeout */
862 	const uint32_t data_timeout_ms = 100;
863 	uint32_t data_timeout_cycles = data_timeout_ms * freq_khz;
864 
865 	sdmmc_ll_set_data_timeout(sdio_hw, data_timeout_cycles);
866 	/* always set response timeout to highest value, it's small enough anyway */
867 	sdmmc_ll_set_response_timeout(sdio_hw, 255);
868 
869 	return 0;
870 }
871 
sdmmc_host_set_bus_width(sdmmc_dev_t * sdio_hw,int slot,size_t width)872 int sdmmc_host_set_bus_width(sdmmc_dev_t *sdio_hw, int slot, size_t width)
873 {
874 	if (!(slot == 0 || slot == 1)) {
875 		return ESP_ERR_INVALID_ARG;
876 	}
877 
878 	const uint16_t mask = BIT(slot);
879 
880 	if (width == 1) {
881 		sdio_hw->ctype.card_width_8 &= ~mask;
882 		sdio_hw->ctype.card_width &= ~mask;
883 	} else if (width == 4) {
884 		sdio_hw->ctype.card_width_8 &= ~mask;
885 		sdio_hw->ctype.card_width |= mask;
886 	} else {
887 		return ESP_ERR_INVALID_ARG;
888 	}
889 
890 	LOG_DBG("slot=%d width=%d", slot, width);
891 	return ESP_OK;
892 }
893 
configure_pin_iomux(int gpio_num)894 static void configure_pin_iomux(int gpio_num)
895 {
896 	const int sdmmc_func = SDMMC_LL_IOMUX_FUNC;
897 	const int drive_strength = 3;
898 
899 	if (gpio_num == GPIO_NUM_NC) {
900 		return; /* parameter check*/
901 	}
902 
903 	int rtc_num = rtc_io_num_map[gpio_num];
904 
905 	rtcio_hal_pulldown_disable(rtc_num);
906 	rtcio_hal_pullup_enable(rtc_num);
907 
908 	uint32_t reg = GPIO_PIN_MUX_REG[gpio_num];
909 
910 	PIN_INPUT_ENABLE(reg);
911 	gpio_hal_iomux_func_sel(reg, sdmmc_func);
912 	PIN_SET_DRV(reg, drive_strength);
913 }
914 
915 /**********************************************************************
916  * Zephyr API
917  **********************************************************************/
918 
919 /*
920  * Reset USDHC controller
921  */
sdhc_esp32_reset(const struct device * dev)922 static int sdhc_esp32_reset(const struct device *dev)
923 {
924 	const struct sdhc_esp32_config *cfg = dev->config;
925 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
926 
927 	/* Set reset bits */
928 	sdio_hw->ctrl.controller_reset = 1;
929 	sdio_hw->ctrl.dma_reset = 1;
930 	sdio_hw->ctrl.fifo_reset = 1;
931 
932 	/* Wait for the reset bits to be cleared by hardware */
933 	int64_t yield_delay_us = 100 * 1000; /* initially 100ms */
934 	int64_t t0 = esp_timer_get_time();
935 	int64_t t1 = 0;
936 
937 	while (sdio_hw->ctrl.controller_reset || sdio_hw->ctrl.fifo_reset ||
938 	       sdio_hw->ctrl.dma_reset) {
939 		t1 = esp_timer_get_time();
940 
941 		if (t1 - t0 > SDMMC_HOST_RESET_TIMEOUT_US) {
942 			return -ETIMEDOUT;
943 		}
944 
945 		if (t1 - t0 > yield_delay_us) {
946 			yield_delay_us *= 2;
947 			k_busy_wait(1);
948 		}
949 	}
950 
951 	/* Reset carried out successfully */
952 	return 0;
953 }
954 
955 /*
956  * Set SDHC io properties
957  */
sdhc_esp32_set_io(const struct device * dev,struct sdhc_io * ios)958 static int sdhc_esp32_set_io(const struct device *dev, struct sdhc_io *ios)
959 {
960 	const struct sdhc_esp32_config *cfg = dev->config;
961 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
962 	struct sdhc_esp32_data *data = dev->data;
963 	uint8_t bus_width;
964 	int ret = 0;
965 
966 	LOG_INF("SDHC I/O: slot: %d, bus width %d, clock %dHz, card power %s, voltage %s",
967 		cfg->slot, ios->bus_width, ios->clock,
968 		ios->power_mode == SDHC_POWER_ON ? "ON" : "OFF",
969 		ios->signal_voltage == SD_VOL_1_8_V ? "1.8V" : "3.3V");
970 
971 	if (ios->clock) {
972 		/* Check for frequency boundaries supported by host */
973 		if (ios->clock > cfg->props.f_max || ios->clock < cfg->props.f_min) {
974 			LOG_ERR("Proposed clock outside supported host range");
975 			return -EINVAL;
976 		}
977 
978 		if (data->bus_clock != (uint32_t)ios->clock) {
979 			/* Try setting new clock */
980 			ret = sdmmc_host_set_card_clk(sdio_hw, cfg->slot, (ios->clock / 1000));
981 
982 			if (ret == 0) {
983 				LOG_INF("Bus clock successfully set to %d kHz", ios->clock / 1000);
984 			} else {
985 				LOG_ERR("Error configuring card clock");
986 				return err_esp2zep(ret);
987 			}
988 
989 			data->bus_clock = (uint32_t)ios->clock;
990 		}
991 	}
992 
993 	if (ios->bus_width > 0) {
994 		/* Set bus width */
995 		switch (ios->bus_width) {
996 		case SDHC_BUS_WIDTH1BIT:
997 			bus_width = 1;
998 			break;
999 		case SDHC_BUS_WIDTH4BIT:
1000 			bus_width = 4;
1001 			break;
1002 		default:
1003 			return -ENOTSUP;
1004 		}
1005 
1006 		if (data->bus_width != bus_width) {
1007 			ret = sdmmc_host_set_bus_width(sdio_hw, cfg->slot, bus_width);
1008 
1009 			if (ret == 0) {
1010 				LOG_INF("Bus width set successfully to %d bit", bus_width);
1011 			} else {
1012 				LOG_ERR("Error configuring bus width");
1013 				return err_esp2zep(ret);
1014 			}
1015 
1016 			data->bus_width = bus_width;
1017 		}
1018 	}
1019 
1020 	/* Toggle card power supply */
1021 	if ((data->power_mode != ios->power_mode) && (cfg->pwr_gpio.port)) {
1022 		if (ios->power_mode == SDHC_POWER_OFF) {
1023 			gpio_pin_set_dt(&cfg->pwr_gpio, 0);
1024 		} else if (ios->power_mode == SDHC_POWER_ON) {
1025 			gpio_pin_set_dt(&cfg->pwr_gpio, 1);
1026 		}
1027 		data->power_mode = ios->power_mode;
1028 	}
1029 
1030 	if (ios->timing > 0) {
1031 		/* Set I/O timing */
1032 		if (data->timing != ios->timing) {
1033 			switch (ios->timing) {
1034 			case SDHC_TIMING_LEGACY:
1035 			case SDHC_TIMING_HS:
1036 				sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, false);
1037 				break;
1038 			case SDHC_TIMING_DDR50:
1039 			case SDHC_TIMING_DDR52:
1040 				/* Enable DDR mode */
1041 				sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, true);
1042 				LOG_INF("DDR mode enabled");
1043 				break;
1044 			case SDHC_TIMING_SDR12:
1045 			case SDHC_TIMING_SDR25:
1046 				sdmmc_ll_enable_ddr_mode(sdio_hw, cfg->slot, false);
1047 				break;
1048 			case SDHC_TIMING_SDR50:
1049 			case SDHC_TIMING_HS400:
1050 			case SDHC_TIMING_SDR104:
1051 			case SDHC_TIMING_HS200:
1052 			default:
1053 				LOG_ERR("Timing mode not supported for this device");
1054 				ret = -ENOTSUP;
1055 				break;
1056 			}
1057 
1058 			LOG_INF("Bus timing successfully changed to %s", timingStr[ios->timing]);
1059 			data->timing = ios->timing;
1060 		}
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 /*
1067  * Return 0 if card is not busy, 1 if it is
1068  */
sdhc_esp32_card_busy(const struct device * dev)1069 static int sdhc_esp32_card_busy(const struct device *dev)
1070 {
1071 	const struct sdhc_esp32_config *cfg = dev->config;
1072 	const sdmmc_dev_t *sdio_hw = cfg->sdio_hw;
1073 
1074 	return (sdio_hw->status.data_busy == 1);
1075 }
1076 
1077 /*
1078  * Send CMD or CMD/DATA via SDHC
1079  */
sdhc_esp32_request(const struct device * dev,struct sdhc_command * cmd,struct sdhc_data * data)1080 static int sdhc_esp32_request(const struct device *dev, struct sdhc_command *cmd,
1081 			      struct sdhc_data *data)
1082 {
1083 	const struct sdhc_esp32_config *cfg = dev->config;
1084 	int retries = (int)(cmd->retries + 1); /* first try plus retries */
1085 	uint32_t timeout_cfg = 0;
1086 	int ret_esp = 0;
1087 	int ret = 0;
1088 
1089 	/* convert command structures Zephyr vs ESP */
1090 	struct sdmmc_command esp_cmd = {
1091 		.opcode = cmd->opcode,
1092 		.arg = cmd->arg,
1093 	};
1094 
1095 	if (data) {
1096 		esp_cmd.data = data->data;
1097 		esp_cmd.blklen = data->block_size;
1098 		esp_cmd.datalen = (data->blocks * data->block_size);
1099 		esp_cmd.buflen = esp_cmd.datalen;
1100 		timeout_cfg = data->timeout_ms;
1101 	} else {
1102 		timeout_cfg = cmd->timeout_ms;
1103 	}
1104 
1105 	/* setting timeout according to command type */
1106 	if (cmd->timeout_ms == SDHC_TIMEOUT_FOREVER) {
1107 		esp_cmd.timeout_ms = SDMMC_TIMEOUT_MAX;
1108 	} else {
1109 		esp_cmd.timeout_ms = timeout_cfg;
1110 	}
1111 
1112 	/*
1113 	 * Handle flags and arguments with ESP32 specifics
1114 	 */
1115 	switch (cmd->opcode) {
1116 	case SD_GO_IDLE_STATE:
1117 		esp_cmd.flags = SCF_CMD_BC | SCF_RSP_R0;
1118 		break;
1119 
1120 	case SD_APP_CMD:
1121 	case SD_SEND_STATUS:
1122 	case SD_SET_BLOCK_SIZE:
1123 		esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R1;
1124 		break;
1125 
1126 	case SD_SEND_IF_COND:
1127 		esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R7;
1128 		break;
1129 
1130 	case SD_APP_SEND_OP_COND:
1131 		esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R3;
1132 		esp_cmd.arg = SD_OCR_SDHC_CAP | SD_OCR_VOL_MASK;
1133 		break;
1134 
1135 	case SDIO_RW_DIRECT:
1136 		esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R5;
1137 		break;
1138 
1139 	case SDIO_SEND_OP_COND:
1140 		esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R4;
1141 		break;
1142 
1143 	case SD_ALL_SEND_CID:
1144 		esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R2;
1145 		break;
1146 
1147 	case SD_SEND_RELATIVE_ADDR:
1148 		esp_cmd.flags = SCF_CMD_BCR | SCF_RSP_R6;
1149 		break;
1150 
1151 	case SD_SEND_CSD:
1152 		esp_cmd.flags = SCF_CMD_AC | SCF_RSP_R2;
1153 		esp_cmd.datalen = 0;
1154 		break;
1155 
1156 	case SD_SELECT_CARD:
1157 		/* Don't expect to see a response when de-selecting a card */
1158 		esp_cmd.flags = SCF_CMD_AC | (cmd->arg > 0 ? SCF_RSP_R1 : 0);
1159 		break;
1160 
1161 	case SD_APP_SEND_SCR:
1162 	case SD_SWITCH:
1163 	case SD_READ_SINGLE_BLOCK:
1164 	case SD_READ_MULTIPLE_BLOCK:
1165 	case SD_APP_SEND_NUM_WRITTEN_BLK:
1166 		esp_cmd.flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1;
1167 		break;
1168 
1169 	case SD_WRITE_SINGLE_BLOCK:
1170 	case SD_WRITE_MULTIPLE_BLOCK:
1171 		esp_cmd.flags = SCF_CMD_ADTC | SCF_RSP_R1;
1172 		break;
1173 
1174 	default:
1175 		LOG_INF("SDHC driver: command %u not supported", cmd->opcode);
1176 		return -ENOTSUP;
1177 	}
1178 
1179 	while (retries > 0) {
1180 
1181 		ret_esp = sdmmc_host_do_transaction(dev, cfg->slot, &esp_cmd);
1182 
1183 		if (ret_esp) {
1184 			retries--; /* error, retry */
1185 		} else {
1186 			break;
1187 		}
1188 	}
1189 
1190 	if ((ret_esp != 0) || esp_cmd.error) {
1191 		LOG_DBG("Error command: %u arg %08x ret_esp = 0x%x error = 0x%x\n",
1192 			cmd->opcode, cmd->arg, ret_esp, esp_cmd.error);
1193 
1194 		ret_esp = (ret_esp > 0) ? ret_esp : esp_cmd.error;
1195 
1196 		ret = err_esp2zep(ret_esp);
1197 	} else {
1198 		/* fill response buffer */
1199 		memcpy(cmd->response, esp_cmd.response, sizeof(cmd->response));
1200 
1201 		int state = MMC_R1_CURRENT_STATE(esp_cmd.response);
1202 
1203 		LOG_DBG("cmd %u arg %08x response %08x %08x %08x %08x err=0x%x state=%d",
1204 			esp_cmd.opcode, esp_cmd.arg, esp_cmd.response[0], esp_cmd.response[1],
1205 			esp_cmd.response[2], esp_cmd.response[3], esp_cmd.error, state);
1206 
1207 		if (data) {
1208 			/* Record number of bytes xfered */
1209 			data->bytes_xfered = esp_cmd.datalen;
1210 		}
1211 	}
1212 
1213 	return ret;
1214 }
1215 
1216 /*
1217  * Get card presence
1218  */
sdhc_esp32_get_card_present(const struct device * dev)1219 static int sdhc_esp32_get_card_present(const struct device *dev)
1220 {
1221 	const struct sdhc_esp32_config *cfg = dev->config;
1222 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
1223 
1224 	return sdmmc_ll_is_card_detected(sdio_hw, cfg->slot);
1225 }
1226 
1227 /*
1228  * Get host properties
1229  */
sdhc_esp32_get_host_props(const struct device * dev,struct sdhc_host_props * props)1230 static int sdhc_esp32_get_host_props(const struct device *dev, struct sdhc_host_props *props)
1231 {
1232 	const struct sdhc_esp32_config *cfg = dev->config;
1233 
1234 	memcpy(props, &cfg->props, sizeof(struct sdhc_host_props));
1235 	return 0;
1236 }
1237 
1238 /**
1239  * @brief SDMMC interrupt handler
1240  *
1241  * All communication in SD protocol is driven by the master, and the hardware
1242  * handles things like stop commands automatically.
1243  * So the interrupt handler doesn't need to do much, we just push interrupt
1244  * status into a queue, clear interrupt flags, and let the task currently
1245  * doing communication figure out what to do next.
1246  *
1247  * Card detect interrupts pose a small issue though, because if a card is
1248  * plugged in and out a few times, while there is no task to process
1249  * the events, event queue can become full and some card detect events
1250  * may be dropped. We ignore this problem for now, since the there are no other
1251  * interesting events which can get lost due to this.
1252  */
sdio_esp32_isr(void * arg)1253 static void IRAM_ATTR sdio_esp32_isr(void *arg)
1254 {
1255 	const struct device *dev = (const struct device *)arg;
1256 	const struct sdhc_esp32_config *cfg = dev->config;
1257 	struct sdhc_esp32_data *data = dev->data;
1258 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
1259 
1260 	struct sdmmc_event event;
1261 	struct k_msgq *queue = data->s_host_ctx.event_queue;
1262 	uint32_t pending = sdmmc_ll_get_intr_status(sdio_hw) & 0xFFFF;
1263 
1264 	sdio_hw->rintsts.val = pending;
1265 	event.sdmmc_status = pending;
1266 
1267 	uint32_t dma_pending = sdio_hw->idsts.val;
1268 
1269 	sdio_hw->idsts.val = dma_pending;
1270 	event.dma_status = dma_pending & 0x1f;
1271 
1272 	if ((pending != 0) || (dma_pending != 0)) {
1273 		k_msgq_put(queue, &event, K_NO_WAIT);
1274 	}
1275 }
1276 
1277 /*
1278  * Perform early system init for SDHC
1279  */
sdhc_esp32_init(const struct device * dev)1280 static int sdhc_esp32_init(const struct device *dev)
1281 {
1282 	const struct sdhc_esp32_config *cfg = dev->config;
1283 	struct sdhc_esp32_data *data = dev->data;
1284 	sdmmc_dev_t *sdio_hw = (sdmmc_dev_t *)cfg->sdio_hw;
1285 	int ret;
1286 
1287 	/* Pin configuration */
1288 
1289 	/* Set power GPIO high, so card starts powered */
1290 	if (cfg->pwr_gpio.port) {
1291 		ret = gpio_pin_configure_dt(&cfg->pwr_gpio, GPIO_OUTPUT_ACTIVE);
1292 
1293 		if (ret) {
1294 			return -EIO;
1295 		}
1296 	}
1297 
1298 	/*
1299 	 * Pins below are only defined for ESP32. For SoC's with GPIO matrix feature
1300 	 * please use pinctrl for pin configuration.
1301 	 */
1302 	configure_pin_iomux(cfg->clk_pin);
1303 	configure_pin_iomux(cfg->cmd_pin);
1304 	configure_pin_iomux(cfg->d0_pin);
1305 	configure_pin_iomux(cfg->d1_pin);
1306 	configure_pin_iomux(cfg->d2_pin);
1307 	configure_pin_iomux(cfg->d3_pin);
1308 
1309 	ret = pinctrl_apply_state(cfg->pcfg, PINCTRL_STATE_DEFAULT);
1310 
1311 	if (ret < 0) {
1312 		LOG_ERR("Failed to configure SDHC pins");
1313 		return -EINVAL;
1314 	}
1315 
1316 	if (!device_is_ready(cfg->clock_dev)) {
1317 		return -ENODEV;
1318 	}
1319 
1320 	ret = clock_control_on(cfg->clock_dev, cfg->clock_subsys);
1321 
1322 	if (ret != 0) {
1323 		LOG_ERR("Error enabling SDHC clock");
1324 		return ret;
1325 	}
1326 
1327 	/* Enable clock to peripheral. Use smallest divider first */
1328 	ret = sdmmc_host_set_clk_div(sdio_hw, 2);
1329 
1330 	if (ret != 0) {
1331 		return err_esp2zep(ret);
1332 	}
1333 
1334 	/* Reset controller */
1335 	sdhc_esp32_reset(dev);
1336 
1337 	/* Clear interrupt status and set interrupt mask to known state */
1338 	sdio_hw->rintsts.val = 0xffffffff;
1339 	sdio_hw->intmask.val = 0;
1340 	sdio_hw->ctrl.int_enable = 0;
1341 
1342 	/* Attach interrupt handler */
1343 	ret = esp_intr_alloc(cfg->irq_source,
1344 				ESP_PRIO_TO_FLAGS(cfg->irq_priority) |
1345 				ESP_INT_FLAGS_CHECK(cfg->irq_flags) | ESP_INTR_FLAG_IRAM,
1346 				&sdio_esp32_isr, (void *)dev,
1347 				&data->s_host_ctx.intr_handle);
1348 
1349 	if (ret != 0) {
1350 		k_msgq_purge(data->s_host_ctx.event_queue);
1351 		return -EFAULT;
1352 	}
1353 
1354 	/* Enable interrupts */
1355 	sdio_hw->intmask.val = SDMMC_INTMASK_CD | SDMMC_INTMASK_CMD_DONE | SDMMC_INTMASK_DATA_OVER |
1356 			       SDMMC_INTMASK_RCRC | SDMMC_INTMASK_DCRC | SDMMC_INTMASK_RTO |
1357 			       SDMMC_INTMASK_DTO | SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE |
1358 			       SDMMC_INTMASK_EBE | SDMMC_INTMASK_RESP_ERR |
1359 			       SDMMC_INTMASK_HLE; /* sdio is enabled only when use */
1360 
1361 	sdio_hw->ctrl.int_enable = 1;
1362 
1363 	/* Disable generation of Busy Clear Interrupt */
1364 	sdio_hw->cardthrctl.busy_clr_int_en = 0;
1365 
1366 	/* Enable DMA */
1367 	sdmmc_host_dma_init(sdio_hw);
1368 
1369 	/* Initialize transaction handler */
1370 	ret = sdmmc_host_transaction_handler_init(data);
1371 
1372 	if (ret != 0) {
1373 		k_msgq_purge(data->s_host_ctx.event_queue);
1374 		esp_intr_free(data->s_host_ctx.intr_handle);
1375 		data->s_host_ctx.intr_handle = NULL;
1376 
1377 		return ret;
1378 	}
1379 
1380 	/* post init settings */
1381 	ret = sdmmc_host_set_card_clk(sdio_hw, cfg->slot, data->bus_clock / 1000);
1382 
1383 	if (ret != 0) {
1384 		LOG_ERR("Error configuring card clock");
1385 		return err_esp2zep(ret);
1386 	}
1387 
1388 	ret = sdmmc_host_set_bus_width(sdio_hw, cfg->slot, data->bus_width);
1389 
1390 	if (ret != 0) {
1391 		LOG_ERR("Error configuring bus width");
1392 		return err_esp2zep(ret);
1393 	}
1394 
1395 	return 0;
1396 }
1397 
1398 static DEVICE_API(sdhc, sdhc_api) = {
1399 	.reset = sdhc_esp32_reset,
1400 	.request = sdhc_esp32_request,
1401 	.set_io = sdhc_esp32_set_io,
1402 	.get_card_present = sdhc_esp32_get_card_present,
1403 	.card_busy = sdhc_esp32_card_busy,
1404 	.get_host_props = sdhc_esp32_get_host_props,
1405 };
1406 
1407 #define SDHC_ESP32_INIT(n)                                                                         \
1408                                                                                                    \
1409 	PINCTRL_DT_DEFINE(DT_DRV_INST(n));                                                         \
1410 	K_MSGQ_DEFINE(sdhc##n##_queue, sizeof(struct sdmmc_event), SDMMC_EVENT_QUEUE_LENGTH, 1);   \
1411                                                                                                    \
1412 	static const struct sdhc_esp32_config sdhc_esp32_##n##_config = {                          \
1413 		.sdio_hw = (const sdmmc_dev_t *)DT_REG_ADDR(DT_INST_PARENT(n)),                    \
1414 		.clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))),                     \
1415 		.clock_subsys = (clock_control_subsys_t)DT_CLOCKS_CELL(DT_INST_PARENT(n), offset), \
1416 		.irq_source = DT_IRQ_BY_IDX(DT_INST_PARENT(n), 0, irq),                            \
1417 		.irq_priority = DT_IRQ_BY_IDX(DT_INST_PARENT(n), 0, priority),                     \
1418 		.irq_flags = DT_IRQ_BY_IDX(DT_INST_PARENT(n), 0, flags),                           \
1419 		.slot = DT_REG_ADDR(DT_DRV_INST(n)),                                               \
1420 		.bus_width_cfg = DT_INST_PROP(n, bus_width),                                       \
1421 		.pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_DRV_INST(n)),                                 \
1422 		.pwr_gpio = GPIO_DT_SPEC_INST_GET_OR(n, pwr_gpios, {0}),                           \
1423 		.clk_pin = DT_INST_PROP_OR(n, clk_pin, GPIO_NUM_NC),                               \
1424 		.cmd_pin = DT_INST_PROP_OR(n, cmd_pin, GPIO_NUM_NC),                               \
1425 		.d0_pin = DT_INST_PROP_OR(n, d0_pin, GPIO_NUM_NC),                                 \
1426 		.d1_pin = DT_INST_PROP_OR(n, d1_pin, GPIO_NUM_NC),                                 \
1427 		.d2_pin = DT_INST_PROP_OR(n, d2_pin, GPIO_NUM_NC),                                 \
1428 		.d3_pin = DT_INST_PROP_OR(n, d3_pin, GPIO_NUM_NC),                                 \
1429 		.props = {.is_spi = false,                                                         \
1430 			  .f_max = DT_INST_PROP(n, max_bus_freq),                                  \
1431 			  .f_min = DT_INST_PROP(n, min_bus_freq),                                  \
1432 			  .max_current_330 = DT_INST_PROP(n, max_current_330),                     \
1433 			  .max_current_180 = DT_INST_PROP(n, max_current_180),                     \
1434 			  .power_delay = DT_INST_PROP_OR(n, power_delay_ms, 0),                    \
1435 			  .host_caps = {.vol_180_support = false,                                  \
1436 					.vol_300_support = false,                                  \
1437 					.vol_330_support = true,                                   \
1438 					.suspend_res_support = false,                              \
1439 					.sdma_support = true,                                      \
1440 					.high_spd_support =                                        \
1441 						(DT_INST_PROP(n, bus_width) == 4) ? true : false,  \
1442 					.adma_2_support = false,                                   \
1443 					.max_blk_len = 0,                                          \
1444 					.ddr50_support = false,                                    \
1445 					.sdr104_support = false,                                   \
1446 					.sdr50_support = false,                                    \
1447 					.bus_8_bit_support = false,                                \
1448 					.bus_4_bit_support =                                       \
1449 						(DT_INST_PROP(n, bus_width) == 4) ? true : false,  \
1450 					.hs200_support = false,                                    \
1451 					.hs400_support = false}}};                                 \
1452                                                                                                    \
1453 	static struct sdhc_esp32_data sdhc_esp32_##n##_data = {                                    \
1454 		.bus_width = SDMMC_SLOT_WIDTH_DEFAULT,                                             \
1455 		.bus_clock = (SDMMC_FREQ_PROBING * 1000),                                          \
1456 		.power_mode = SDHC_POWER_ON,                                                       \
1457 		.timing = SDHC_TIMING_LEGACY,                                                      \
1458 		.s_host_ctx = {.event_queue = &sdhc##n##_queue}};                                  \
1459                                                                                                    \
1460 	DEVICE_DT_INST_DEFINE(n, &sdhc_esp32_init, NULL, &sdhc_esp32_##n##_data,                   \
1461 			      &sdhc_esp32_##n##_config, POST_KERNEL, CONFIG_SDHC_INIT_PRIORITY,    \
1462 			      &sdhc_api);
1463 
1464 DT_INST_FOREACH_STATUS_OKAY(SDHC_ESP32_INIT)
1465 
1466 BUILD_ASSERT(DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT) == 1,
1467 	     "Currently, only one espressif,esp32-sdhc-slot compatible node is supported");
1468