1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 #include "esp_err.h"
9 #include "esp_log.h"
10 #include "esp_pm.h"
11 #include "freertos/FreeRTOS.h"
12 #include "freertos/queue.h"
13 #include "freertos/semphr.h"
14 #include "freertos/task.h"
15 #include "soc/sdmmc_periph.h"
16 #include "soc/soc_memory_layout.h"
17 #include "driver/sdmmc_types.h"
18 #include "driver/sdmmc_defs.h"
19 #include "driver/sdmmc_host.h"
20 #include "sdmmc_private.h"
21 
22 
23 /* Number of DMA descriptors used for transfer.
24  * Increasing this value above 4 doesn't improve performance for the usual case
25  * of SD memory cards (most data transfers are multiples of 512 bytes).
26  */
27 #define SDMMC_DMA_DESC_CNT  4
28 
29 static const char* TAG = "sdmmc_req";
30 
31 typedef enum {
32     SDMMC_IDLE,
33     SDMMC_SENDING_CMD,
34     SDMMC_SENDING_DATA,
35     SDMMC_BUSY,
36 } sdmmc_req_state_t;
37 
38 typedef struct {
39     uint8_t* ptr;
40     size_t size_remaining;
41     size_t next_desc;
42     size_t desc_remaining;
43 } sdmmc_transfer_state_t;
44 
45 const uint32_t SDMMC_DATA_ERR_MASK =
46         SDMMC_INTMASK_DTO | SDMMC_INTMASK_DCRC |
47         SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE  |
48         SDMMC_INTMASK_EBE;
49 
50 const uint32_t SDMMC_DMA_DONE_MASK =
51         SDMMC_IDMAC_INTMASK_RI | SDMMC_IDMAC_INTMASK_TI |
52         SDMMC_IDMAC_INTMASK_NI;
53 
54 const uint32_t SDMMC_CMD_ERR_MASK =
55         SDMMC_INTMASK_RTO |
56         SDMMC_INTMASK_RCRC |
57         SDMMC_INTMASK_RESP_ERR;
58 
59 static sdmmc_desc_t s_dma_desc[SDMMC_DMA_DESC_CNT];
60 static sdmmc_transfer_state_t s_cur_transfer = { 0 };
61 static QueueHandle_t s_request_mutex;
62 static bool s_is_app_cmd;   // This flag is set if the next command is an APP command
63 #ifdef CONFIG_PM_ENABLE
64 static esp_pm_lock_handle_t s_pm_lock;
65 #endif
66 
67 static esp_err_t handle_idle_state_events(void);
68 static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd);
69 static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
70         sdmmc_event_t* unhandled_events);
71 static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
72         sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events);
73 static void process_command_response(uint32_t status, sdmmc_command_t* cmd);
74 static void fill_dma_descriptors(size_t num_desc);
75 static size_t get_free_descriptors_count(void);
76 static bool wait_for_busy_cleared(int timeout_ms);
77 
sdmmc_host_transaction_handler_init(void)78 esp_err_t sdmmc_host_transaction_handler_init(void)
79 {
80     assert(s_request_mutex == NULL);
81     s_request_mutex = xSemaphoreCreateMutex();
82     if (!s_request_mutex) {
83         return ESP_ERR_NO_MEM;
84     }
85     s_is_app_cmd = false;
86 #ifdef CONFIG_PM_ENABLE
87     esp_err_t err = esp_pm_lock_create(ESP_PM_APB_FREQ_MAX, 0, "sdmmc", &s_pm_lock);
88     if (err != ESP_OK) {
89         vSemaphoreDelete(s_request_mutex);
90         s_request_mutex = NULL;
91         return err;
92     }
93 #endif
94     return ESP_OK;
95 }
96 
sdmmc_host_transaction_handler_deinit(void)97 void sdmmc_host_transaction_handler_deinit(void)
98 {
99     assert(s_request_mutex);
100 #ifdef CONFIG_PM_ENABLE
101     esp_pm_lock_delete(s_pm_lock);
102     s_pm_lock = NULL;
103 #endif
104     vSemaphoreDelete(s_request_mutex);
105     s_request_mutex = NULL;
106 }
107 
sdmmc_host_do_transaction(int slot,sdmmc_command_t * cmdinfo)108 esp_err_t sdmmc_host_do_transaction(int slot, sdmmc_command_t* cmdinfo)
109 {
110     esp_err_t ret;
111     xSemaphoreTake(s_request_mutex, portMAX_DELAY);
112 #ifdef CONFIG_PM_ENABLE
113     esp_pm_lock_acquire(s_pm_lock);
114 #endif
115     // dispose of any events which happened asynchronously
116     handle_idle_state_events();
117     // convert cmdinfo to hardware register value
118     sdmmc_hw_cmd_t hw_cmd = make_hw_cmd(cmdinfo);
119     if (cmdinfo->data) {
120         // Length should be either <4 or >=4 and =0 (mod 4).
121         if (cmdinfo->datalen >= 4 && cmdinfo->datalen % 4 != 0) {
122             ESP_LOGD(TAG, "%s: invalid size: total=%d",
123                     __func__, cmdinfo->datalen);
124             ret = ESP_ERR_INVALID_SIZE;
125             goto out;
126         }
127         if ((intptr_t) cmdinfo->data % 4 != 0 ||
128                 !esp_ptr_dma_capable(cmdinfo->data)) {
129             ESP_LOGD(TAG, "%s: buffer %p can not be used for DMA", __func__, cmdinfo->data);
130             ret = ESP_ERR_INVALID_ARG;
131             goto out;
132         }
133         // this clears "owned by IDMAC" bits
134         memset(s_dma_desc, 0, sizeof(s_dma_desc));
135         // initialize first descriptor
136         s_dma_desc[0].first_descriptor = 1;
137         // save transfer info
138         s_cur_transfer.ptr = (uint8_t*) cmdinfo->data;
139         s_cur_transfer.size_remaining = cmdinfo->datalen;
140         s_cur_transfer.next_desc = 0;
141         s_cur_transfer.desc_remaining = (cmdinfo->datalen + SDMMC_DMA_MAX_BUF_LEN - 1) / SDMMC_DMA_MAX_BUF_LEN;
142         // prepare descriptors
143         fill_dma_descriptors(SDMMC_DMA_DESC_CNT);
144         // write transfer info into hardware
145         sdmmc_host_dma_prepare(&s_dma_desc[0], cmdinfo->blklen, cmdinfo->datalen);
146     }
147     // write command into hardware, this also sends the command to the card
148     ret = sdmmc_host_start_command(slot, hw_cmd, cmdinfo->arg);
149     if (ret != ESP_OK) {
150         goto out;
151     }
152     // process events until transfer is complete
153     cmdinfo->error = ESP_OK;
154     sdmmc_req_state_t state = SDMMC_SENDING_CMD;
155     sdmmc_event_t unhandled_events = { 0 };
156     while (state != SDMMC_IDLE) {
157         ret = handle_event(cmdinfo, &state, &unhandled_events);
158         if (ret != ESP_OK) {
159             break;
160         }
161     }
162     if (ret == ESP_OK && (cmdinfo->flags & SCF_WAIT_BUSY)) {
163         if (!wait_for_busy_cleared(cmdinfo->timeout_ms)) {
164             ret = ESP_ERR_TIMEOUT;
165         }
166     }
167     s_is_app_cmd = (ret == ESP_OK && cmdinfo->opcode == MMC_APP_CMD);
168 
169 out:
170 #ifdef CONFIG_PM_ENABLE
171     esp_pm_lock_release(s_pm_lock);
172 #endif
173     xSemaphoreGive(s_request_mutex);
174     return ret;
175 }
176 
get_free_descriptors_count(void)177 static size_t get_free_descriptors_count(void)
178 {
179     const size_t next = s_cur_transfer.next_desc;
180     size_t count = 0;
181     /* Starting with the current DMA descriptor, count the number of
182      * descriptors which have 'owned_by_idmac' set to 0. These are the
183      * descriptors already processed by the DMA engine.
184      */
185     for (size_t i = 0; i < SDMMC_DMA_DESC_CNT; ++i) {
186         sdmmc_desc_t* desc = &s_dma_desc[(next + i) % SDMMC_DMA_DESC_CNT];
187         if (desc->owned_by_idmac) {
188             break;
189         }
190         ++count;
191         if (desc->next_desc_ptr == NULL) {
192             /* final descriptor in the chain */
193             break;
194         }
195     }
196     return count;
197 }
198 
fill_dma_descriptors(size_t num_desc)199 static void fill_dma_descriptors(size_t num_desc)
200 {
201     for (size_t i = 0; i < num_desc; ++i) {
202         if (s_cur_transfer.size_remaining == 0) {
203             return;
204         }
205         const size_t next = s_cur_transfer.next_desc;
206         sdmmc_desc_t* desc = &s_dma_desc[next];
207         assert(!desc->owned_by_idmac);
208         size_t size_to_fill =
209             (s_cur_transfer.size_remaining < SDMMC_DMA_MAX_BUF_LEN) ?
210                 s_cur_transfer.size_remaining : SDMMC_DMA_MAX_BUF_LEN;
211         bool last = size_to_fill == s_cur_transfer.size_remaining;
212         desc->last_descriptor = last;
213         desc->second_address_chained = 1;
214         desc->owned_by_idmac = 1;
215         desc->buffer1_ptr = s_cur_transfer.ptr;
216         desc->next_desc_ptr = (last) ? NULL : &s_dma_desc[(next + 1) % SDMMC_DMA_DESC_CNT];
217         assert(size_to_fill < 4 || size_to_fill % 4 == 0);
218         desc->buffer1_size = (size_to_fill + 3) & (~3);
219 
220         s_cur_transfer.size_remaining -= size_to_fill;
221         s_cur_transfer.ptr += size_to_fill;
222         s_cur_transfer.next_desc = (s_cur_transfer.next_desc + 1) % SDMMC_DMA_DESC_CNT;
223         ESP_LOGV(TAG, "fill %d desc=%d rem=%d next=%d last=%d sz=%d",
224                 num_desc, next, s_cur_transfer.size_remaining,
225                 s_cur_transfer.next_desc, desc->last_descriptor, desc->buffer1_size);
226     }
227 }
228 
handle_idle_state_events(void)229 static esp_err_t handle_idle_state_events(void)
230 {
231     /* Handle any events which have happened in between transfers.
232      * Under current assumptions (no SDIO support) only card detect events
233      * can happen in the idle state.
234      */
235     sdmmc_event_t evt;
236     while (sdmmc_host_wait_for_event(0, &evt) == ESP_OK) {
237         if (evt.sdmmc_status & SDMMC_INTMASK_CD) {
238             ESP_LOGV(TAG, "card detect event");
239             evt.sdmmc_status &= ~SDMMC_INTMASK_CD;
240         }
241         if (evt.sdmmc_status != 0 || evt.dma_status != 0) {
242             ESP_LOGE(TAG, "handle_idle_state_events unhandled: %08x %08x",
243                     evt.sdmmc_status, evt.dma_status);
244         }
245 
246     }
247     return ESP_OK;
248 }
249 
250 
handle_event(sdmmc_command_t * cmd,sdmmc_req_state_t * state,sdmmc_event_t * unhandled_events)251 static esp_err_t handle_event(sdmmc_command_t* cmd, sdmmc_req_state_t* state,
252         sdmmc_event_t* unhandled_events)
253 {
254     sdmmc_event_t event;
255     esp_err_t err = sdmmc_host_wait_for_event(cmd->timeout_ms / portTICK_PERIOD_MS, &event);
256     if (err != ESP_OK) {
257         ESP_LOGE(TAG, "sdmmc_host_wait_for_event returned 0x%x", err);
258         if (err == ESP_ERR_TIMEOUT) {
259             sdmmc_host_dma_stop();
260         }
261         return err;
262     }
263     ESP_LOGV(TAG, "sdmmc_handle_event: event %08x %08x, unhandled %08x %08x",
264             event.sdmmc_status, event.dma_status,
265             unhandled_events->sdmmc_status, unhandled_events->dma_status);
266     event.sdmmc_status |= unhandled_events->sdmmc_status;
267     event.dma_status |= unhandled_events->dma_status;
268     process_events(event, cmd, state, unhandled_events);
269     ESP_LOGV(TAG, "sdmmc_handle_event: events unhandled: %08x %08x", unhandled_events->sdmmc_status, unhandled_events->dma_status);
270     return ESP_OK;
271 }
272 
cmd_needs_auto_stop(const sdmmc_command_t * cmd)273 static bool cmd_needs_auto_stop(const sdmmc_command_t* cmd)
274 {
275     /* SDMMC host needs an "auto stop" flag for the following commands: */
276     return cmd->datalen > 0 &&
277            (cmd->opcode == MMC_WRITE_BLOCK_MULTIPLE ||
278             cmd->opcode == MMC_READ_BLOCK_MULTIPLE ||
279             cmd->opcode == MMC_WRITE_DAT_UNTIL_STOP ||
280             cmd->opcode == MMC_READ_DAT_UNTIL_STOP);
281 }
282 
make_hw_cmd(sdmmc_command_t * cmd)283 static sdmmc_hw_cmd_t make_hw_cmd(sdmmc_command_t* cmd)
284 {
285     sdmmc_hw_cmd_t res = { 0 };
286 
287     res.cmd_index = cmd->opcode;
288     if (cmd->opcode == MMC_STOP_TRANSMISSION) {
289         res.stop_abort_cmd = 1;
290     } else if (cmd->opcode == MMC_GO_IDLE_STATE) {
291         res.send_init = 1;
292     } else {
293         res.wait_complete = 1;
294     }
295     if (cmd->opcode == MMC_GO_IDLE_STATE) {
296         res.send_init = 1;
297     }
298     if (cmd->flags & SCF_RSP_PRESENT) {
299         res.response_expect = 1;
300         if (cmd->flags & SCF_RSP_136) {
301             res.response_long = 1;
302         }
303     }
304     if (cmd->flags & SCF_RSP_CRC) {
305         res.check_response_crc = 1;
306     }
307     res.use_hold_reg = 1;
308     if (cmd->data) {
309         res.data_expected = 1;
310         if ((cmd->flags & SCF_CMD_READ) == 0) {
311             res.rw = 1;
312         }
313         assert(cmd->datalen % cmd->blklen == 0);
314         res.send_auto_stop = cmd_needs_auto_stop(cmd) ? 1 : 0;
315     }
316     ESP_LOGV(TAG, "%s: opcode=%d, rexp=%d, crc=%d, auto_stop=%d", __func__,
317             res.cmd_index, res.response_expect, res.check_response_crc,
318             res.send_auto_stop);
319     return res;
320 }
321 
process_command_response(uint32_t status,sdmmc_command_t * cmd)322 static void process_command_response(uint32_t status, sdmmc_command_t* cmd)
323 {
324     if (cmd->flags & SCF_RSP_PRESENT) {
325         if (cmd->flags & SCF_RSP_136) {
326             /* Destination is 4-byte aligned, can memcopy from peripheral registers */
327             memcpy(cmd->response, (uint32_t*) SDMMC.resp, 4 * sizeof(uint32_t));
328         } else {
329             cmd->response[0] = SDMMC.resp[0];
330             cmd->response[1] = 0;
331             cmd->response[2] = 0;
332             cmd->response[3] = 0;
333         }
334     }
335     esp_err_t err = ESP_OK;
336     if (status & SDMMC_INTMASK_RTO) {
337         // response timeout is only possible when response is expected
338         assert(cmd->flags & SCF_RSP_PRESENT);
339         err = ESP_ERR_TIMEOUT;
340     } else if ((cmd->flags & SCF_RSP_CRC) && (status & SDMMC_INTMASK_RCRC)) {
341         err = ESP_ERR_INVALID_CRC;
342     } else if (status & SDMMC_INTMASK_RESP_ERR) {
343         err = ESP_ERR_INVALID_RESPONSE;
344     }
345     if (err != ESP_OK) {
346         cmd->error = err;
347         if (cmd->data) {
348             sdmmc_host_dma_stop();
349         }
350         ESP_LOGD(TAG, "%s: error 0x%x  (status=%08x)", __func__, err, status);
351     }
352 }
353 
process_data_status(uint32_t status,sdmmc_command_t * cmd)354 static void process_data_status(uint32_t status, sdmmc_command_t* cmd)
355 {
356     if (status & SDMMC_DATA_ERR_MASK) {
357         if (status & SDMMC_INTMASK_DTO) {
358             cmd->error = ESP_ERR_TIMEOUT;
359         } else if (status & SDMMC_INTMASK_DCRC) {
360             cmd->error = ESP_ERR_INVALID_CRC;
361         } else if ((status & SDMMC_INTMASK_EBE) &&
362                 (cmd->flags & SCF_CMD_READ) == 0) {
363             cmd->error = ESP_ERR_TIMEOUT;
364         } else {
365             cmd->error = ESP_FAIL;
366         }
367         SDMMC.ctrl.fifo_reset = 1;
368     }
369     if (cmd->error != 0) {
370         if (cmd->data) {
371             sdmmc_host_dma_stop();
372         }
373         ESP_LOGD(TAG, "%s: error 0x%x (status=%08x)", __func__, cmd->error, status);
374     }
375 
376 }
377 
mask_check_and_clear(uint32_t * state,uint32_t mask)378 static inline bool mask_check_and_clear(uint32_t* state, uint32_t mask) {
379     bool ret = ((*state) & mask) != 0;
380     *state &= ~mask;
381     return ret;
382 }
383 
process_events(sdmmc_event_t evt,sdmmc_command_t * cmd,sdmmc_req_state_t * pstate,sdmmc_event_t * unhandled_events)384 static esp_err_t process_events(sdmmc_event_t evt, sdmmc_command_t* cmd,
385         sdmmc_req_state_t* pstate, sdmmc_event_t* unhandled_events)
386 {
387     const char* const s_state_names[] __attribute__((unused)) = {
388         "IDLE",
389         "SENDING_CMD",
390         "SENDIND_DATA",
391         "BUSY"
392     };
393     sdmmc_event_t orig_evt = evt;
394     ESP_LOGV(TAG, "%s: state=%s evt=%x dma=%x", __func__, s_state_names[*pstate],
395             evt.sdmmc_status, evt.dma_status);
396     sdmmc_req_state_t next_state = *pstate;
397     sdmmc_req_state_t state = (sdmmc_req_state_t) -1;
398     while (next_state != state) {
399         state = next_state;
400         switch (state) {
401             case SDMMC_IDLE:
402                 break;
403 
404             case SDMMC_SENDING_CMD:
405                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_CMD_ERR_MASK)) {
406                     process_command_response(orig_evt.sdmmc_status, cmd);
407                     break;      // Need to wait for the CMD_DONE interrupt
408                 }
409                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_CMD_DONE)) {
410                     process_command_response(orig_evt.sdmmc_status, cmd);
411                     if (cmd->error != ESP_OK) {
412                         next_state = SDMMC_IDLE;
413                         break;
414                     }
415 
416                     if (cmd->data == NULL) {
417                         next_state = SDMMC_IDLE;
418                     } else {
419                         next_state = SDMMC_SENDING_DATA;
420                     }
421                 }
422                 break;
423 
424 
425             case SDMMC_SENDING_DATA:
426                 if (mask_check_and_clear(&evt.sdmmc_status, SDMMC_DATA_ERR_MASK)) {
427                     process_data_status(orig_evt.sdmmc_status, cmd);
428                     sdmmc_host_dma_stop();
429                 }
430                 if (mask_check_and_clear(&evt.dma_status, SDMMC_DMA_DONE_MASK)) {
431                     s_cur_transfer.desc_remaining--;
432                     if (s_cur_transfer.size_remaining) {
433                         int desc_to_fill = get_free_descriptors_count();
434                         fill_dma_descriptors(desc_to_fill);
435                         sdmmc_host_dma_resume();
436                     }
437                     if (s_cur_transfer.desc_remaining == 0) {
438                         next_state = SDMMC_BUSY;
439                     }
440                 }
441                 if (orig_evt.sdmmc_status & (SDMMC_INTMASK_SBE | SDMMC_INTMASK_DATA_OVER)) {
442                     // On start bit error, DATA_DONE interrupt will not be generated
443                     next_state = SDMMC_IDLE;
444                     break;
445                 }
446                 break;
447 
448             case SDMMC_BUSY:
449                 if (!mask_check_and_clear(&evt.sdmmc_status, SDMMC_INTMASK_DATA_OVER)) {
450                     break;
451                 }
452                 process_data_status(orig_evt.sdmmc_status, cmd);
453                 next_state = SDMMC_IDLE;
454                 break;
455         }
456         ESP_LOGV(TAG, "%s state=%s next_state=%s", __func__, s_state_names[state], s_state_names[next_state]);
457     }
458     *pstate = state;
459     *unhandled_events = evt;
460     return ESP_OK;
461 }
462 
wait_for_busy_cleared(int timeout_ms)463 static bool wait_for_busy_cleared(int timeout_ms)
464 {
465     if (timeout_ms == 0) {
466         return !sdmmc_host_card_busy();
467     }
468 
469     /* It would have been nice to do this without polling, however the peripheral
470      * can only generate Busy Clear Interrupt for data write commands, and waiting
471      * for busy clear is mostly needed for other commands such as MMC_SWITCH.
472      */
473     int timeout_ticks = (timeout_ms + portTICK_PERIOD_MS - 1) / portTICK_PERIOD_MS;
474     while (timeout_ticks-- > 0) {
475         if (!sdmmc_host_card_busy()) {
476             return true;
477         }
478         vTaskDelay(1);
479     }
480     return false;
481 }
482