1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26
27 #include <linux/leds.h>
28
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #include "sdhci.h"
36
37 #define DRIVER_NAME "sdhci"
38
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define MAX_TUNING_LOOP 40
46
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
49
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51
52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53
sdhci_dumpregs(struct sdhci_host * host)54 void sdhci_dumpregs(struct sdhci_host *host)
55 {
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
99
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 }
111 }
112
113 if (host->ops->dump_vendor_regs)
114 host->ops->dump_vendor_regs(host);
115
116 SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124 \*****************************************************************************/
125
sdhci_do_enable_v4_mode(struct sdhci_host * host)126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127 {
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 }
137
138 /*
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
141 */
sdhci_enable_v4_mode(struct sdhci_host * host)142 void sdhci_enable_v4_mode(struct sdhci_host *host)
143 {
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146 }
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
sdhci_data_line_cmd(struct mmc_command * cmd)149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150 {
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152 }
153
sdhci_set_card_detection(struct sdhci_host * host,bool enable)154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155 {
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 }
175
sdhci_enable_card_detection(struct sdhci_host * host)176 static void sdhci_enable_card_detection(struct sdhci_host *host)
177 {
178 sdhci_set_card_detection(host, true);
179 }
180
sdhci_disable_card_detection(struct sdhci_host * host)181 static void sdhci_disable_card_detection(struct sdhci_host *host)
182 {
183 sdhci_set_card_detection(host, false);
184 }
185
sdhci_runtime_pm_bus_on(struct sdhci_host * host)186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187 {
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(mmc_dev(host->mmc));
192 }
193
sdhci_runtime_pm_bus_off(struct sdhci_host * host)194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195 {
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(mmc_dev(host->mmc));
200 }
201
sdhci_reset(struct sdhci_host * host,u8 mask)202 void sdhci_reset(struct sdhci_host *host, u8 mask)
203 {
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210 /* Reset-all turns off SD Bus Power */
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215 /* Wait max 100 ms */
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218 /* hw clears the bit when it's done */
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_dumpregs(host);
228 return;
229 }
230 udelay(10);
231 }
232 }
233 EXPORT_SYMBOL_GPL(sdhci_reset);
234
sdhci_do_reset(struct sdhci_host * host,u8 mask)235 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
236 {
237 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238 struct mmc_host *mmc = host->mmc;
239
240 if (!mmc->ops->get_cd(mmc))
241 return;
242 }
243
244 host->ops->reset(host, mask);
245
246 if (mask & SDHCI_RESET_ALL) {
247 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
248 if (host->ops->enable_dma)
249 host->ops->enable_dma(host);
250 }
251
252 /* Resetting the controller clears many */
253 host->preset_enabled = false;
254 }
255 }
256
sdhci_set_default_irqs(struct sdhci_host * host)257 static void sdhci_set_default_irqs(struct sdhci_host *host)
258 {
259 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
260 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
261 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
262 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
263 SDHCI_INT_RESPONSE;
264
265 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
266 host->tuning_mode == SDHCI_TUNING_MODE_3)
267 host->ier |= SDHCI_INT_RETUNE;
268
269 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
270 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271 }
272
sdhci_config_dma(struct sdhci_host * host)273 static void sdhci_config_dma(struct sdhci_host *host)
274 {
275 u8 ctrl;
276 u16 ctrl2;
277
278 if (host->version < SDHCI_SPEC_200)
279 return;
280
281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
282
283 /*
284 * Always adjust the DMA selection as some controllers
285 * (e.g. JMicron) can't do PIO properly when the selection
286 * is ADMA.
287 */
288 ctrl &= ~SDHCI_CTRL_DMA_MASK;
289 if (!(host->flags & SDHCI_REQ_USE_DMA))
290 goto out;
291
292 /* Note if DMA Select is zero then SDMA is selected */
293 if (host->flags & SDHCI_USE_ADMA)
294 ctrl |= SDHCI_CTRL_ADMA32;
295
296 if (host->flags & SDHCI_USE_64_BIT_DMA) {
297 /*
298 * If v4 mode, all supported DMA can be 64-bit addressing if
299 * controller supports 64-bit system address, otherwise only
300 * ADMA can support 64-bit addressing.
301 */
302 if (host->v4_mode) {
303 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
304 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
305 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
306 } else if (host->flags & SDHCI_USE_ADMA) {
307 /*
308 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
309 * set SDHCI_CTRL_ADMA64.
310 */
311 ctrl |= SDHCI_CTRL_ADMA64;
312 }
313 }
314
315 out:
316 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
317 }
318
sdhci_init(struct sdhci_host * host,int soft)319 static void sdhci_init(struct sdhci_host *host, int soft)
320 {
321 struct mmc_host *mmc = host->mmc;
322 unsigned long flags;
323
324 if (soft)
325 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
326 else
327 sdhci_do_reset(host, SDHCI_RESET_ALL);
328
329 if (host->v4_mode)
330 sdhci_do_enable_v4_mode(host);
331
332 spin_lock_irqsave(&host->lock, flags);
333 sdhci_set_default_irqs(host);
334 spin_unlock_irqrestore(&host->lock, flags);
335
336 host->cqe_on = false;
337
338 if (soft) {
339 /* force clock reconfiguration */
340 host->clock = 0;
341 mmc->ops->set_ios(mmc, &mmc->ios);
342 }
343 }
344
sdhci_reinit(struct sdhci_host * host)345 static void sdhci_reinit(struct sdhci_host *host)
346 {
347 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
348
349 sdhci_init(host, 0);
350 sdhci_enable_card_detection(host);
351
352 /*
353 * A change to the card detect bits indicates a change in present state,
354 * refer sdhci_set_card_detection(). A card detect interrupt might have
355 * been missed while the host controller was being reset, so trigger a
356 * rescan to check.
357 */
358 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
359 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
360 }
361
__sdhci_led_activate(struct sdhci_host * host)362 static void __sdhci_led_activate(struct sdhci_host *host)
363 {
364 u8 ctrl;
365
366 if (host->quirks & SDHCI_QUIRK_NO_LED)
367 return;
368
369 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
370 ctrl |= SDHCI_CTRL_LED;
371 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
372 }
373
__sdhci_led_deactivate(struct sdhci_host * host)374 static void __sdhci_led_deactivate(struct sdhci_host *host)
375 {
376 u8 ctrl;
377
378 if (host->quirks & SDHCI_QUIRK_NO_LED)
379 return;
380
381 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
382 ctrl &= ~SDHCI_CTRL_LED;
383 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
384 }
385
386 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)387 static void sdhci_led_control(struct led_classdev *led,
388 enum led_brightness brightness)
389 {
390 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
391 unsigned long flags;
392
393 spin_lock_irqsave(&host->lock, flags);
394
395 if (host->runtime_suspended)
396 goto out;
397
398 if (brightness == LED_OFF)
399 __sdhci_led_deactivate(host);
400 else
401 __sdhci_led_activate(host);
402 out:
403 spin_unlock_irqrestore(&host->lock, flags);
404 }
405
sdhci_led_register(struct sdhci_host * host)406 static int sdhci_led_register(struct sdhci_host *host)
407 {
408 struct mmc_host *mmc = host->mmc;
409
410 if (host->quirks & SDHCI_QUIRK_NO_LED)
411 return 0;
412
413 snprintf(host->led_name, sizeof(host->led_name),
414 "%s::", mmc_hostname(mmc));
415
416 host->led.name = host->led_name;
417 host->led.brightness = LED_OFF;
418 host->led.default_trigger = mmc_hostname(mmc);
419 host->led.brightness_set = sdhci_led_control;
420
421 return led_classdev_register(mmc_dev(mmc), &host->led);
422 }
423
sdhci_led_unregister(struct sdhci_host * host)424 static void sdhci_led_unregister(struct sdhci_host *host)
425 {
426 if (host->quirks & SDHCI_QUIRK_NO_LED)
427 return;
428
429 led_classdev_unregister(&host->led);
430 }
431
sdhci_led_activate(struct sdhci_host * host)432 static inline void sdhci_led_activate(struct sdhci_host *host)
433 {
434 }
435
sdhci_led_deactivate(struct sdhci_host * host)436 static inline void sdhci_led_deactivate(struct sdhci_host *host)
437 {
438 }
439
440 #else
441
sdhci_led_register(struct sdhci_host * host)442 static inline int sdhci_led_register(struct sdhci_host *host)
443 {
444 return 0;
445 }
446
sdhci_led_unregister(struct sdhci_host * host)447 static inline void sdhci_led_unregister(struct sdhci_host *host)
448 {
449 }
450
sdhci_led_activate(struct sdhci_host * host)451 static inline void sdhci_led_activate(struct sdhci_host *host)
452 {
453 __sdhci_led_activate(host);
454 }
455
sdhci_led_deactivate(struct sdhci_host * host)456 static inline void sdhci_led_deactivate(struct sdhci_host *host)
457 {
458 __sdhci_led_deactivate(host);
459 }
460
461 #endif
462
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)463 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
464 unsigned long timeout)
465 {
466 if (sdhci_data_line_cmd(mrq->cmd))
467 mod_timer(&host->data_timer, timeout);
468 else
469 mod_timer(&host->timer, timeout);
470 }
471
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)472 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
473 {
474 if (sdhci_data_line_cmd(mrq->cmd))
475 del_timer(&host->data_timer);
476 else
477 del_timer(&host->timer);
478 }
479
sdhci_has_requests(struct sdhci_host * host)480 static inline bool sdhci_has_requests(struct sdhci_host *host)
481 {
482 return host->cmd || host->data_cmd;
483 }
484
485 /*****************************************************************************\
486 * *
487 * Core functions *
488 * *
489 \*****************************************************************************/
490
sdhci_read_block_pio(struct sdhci_host * host)491 static void sdhci_read_block_pio(struct sdhci_host *host)
492 {
493 unsigned long flags;
494 size_t blksize, len, chunk;
495 u32 scratch;
496 u8 *buf;
497
498 DBG("PIO reading\n");
499
500 blksize = host->data->blksz;
501 chunk = 0;
502
503 local_irq_save(flags);
504
505 while (blksize) {
506 BUG_ON(!sg_miter_next(&host->sg_miter));
507
508 len = min(host->sg_miter.length, blksize);
509
510 blksize -= len;
511 host->sg_miter.consumed = len;
512
513 buf = host->sg_miter.addr;
514
515 while (len) {
516 if (chunk == 0) {
517 scratch = sdhci_readl(host, SDHCI_BUFFER);
518 chunk = 4;
519 }
520
521 *buf = scratch & 0xFF;
522
523 buf++;
524 scratch >>= 8;
525 chunk--;
526 len--;
527 }
528 }
529
530 sg_miter_stop(&host->sg_miter);
531
532 local_irq_restore(flags);
533 }
534
sdhci_write_block_pio(struct sdhci_host * host)535 static void sdhci_write_block_pio(struct sdhci_host *host)
536 {
537 unsigned long flags;
538 size_t blksize, len, chunk;
539 u32 scratch;
540 u8 *buf;
541
542 DBG("PIO writing\n");
543
544 blksize = host->data->blksz;
545 chunk = 0;
546 scratch = 0;
547
548 local_irq_save(flags);
549
550 while (blksize) {
551 BUG_ON(!sg_miter_next(&host->sg_miter));
552
553 len = min(host->sg_miter.length, blksize);
554
555 blksize -= len;
556 host->sg_miter.consumed = len;
557
558 buf = host->sg_miter.addr;
559
560 while (len) {
561 scratch |= (u32)*buf << (chunk * 8);
562
563 buf++;
564 chunk++;
565 len--;
566
567 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
568 sdhci_writel(host, scratch, SDHCI_BUFFER);
569 chunk = 0;
570 scratch = 0;
571 }
572 }
573 }
574
575 sg_miter_stop(&host->sg_miter);
576
577 local_irq_restore(flags);
578 }
579
sdhci_transfer_pio(struct sdhci_host * host)580 static void sdhci_transfer_pio(struct sdhci_host *host)
581 {
582 u32 mask;
583
584 if (host->blocks == 0)
585 return;
586
587 if (host->data->flags & MMC_DATA_READ)
588 mask = SDHCI_DATA_AVAILABLE;
589 else
590 mask = SDHCI_SPACE_AVAILABLE;
591
592 /*
593 * Some controllers (JMicron JMB38x) mess up the buffer bits
594 * for transfers < 4 bytes. As long as it is just one block,
595 * we can ignore the bits.
596 */
597 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
598 (host->data->blocks == 1))
599 mask = ~0;
600
601 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
602 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
603 udelay(100);
604
605 if (host->data->flags & MMC_DATA_READ)
606 sdhci_read_block_pio(host);
607 else
608 sdhci_write_block_pio(host);
609
610 host->blocks--;
611 if (host->blocks == 0)
612 break;
613 }
614
615 DBG("PIO transfer complete.\n");
616 }
617
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)618 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
619 struct mmc_data *data, int cookie)
620 {
621 int sg_count;
622
623 /*
624 * If the data buffers are already mapped, return the previous
625 * dma_map_sg() result.
626 */
627 if (data->host_cookie == COOKIE_PRE_MAPPED)
628 return data->sg_count;
629
630 /* Bounce write requests to the bounce buffer */
631 if (host->bounce_buffer) {
632 unsigned int length = data->blksz * data->blocks;
633
634 if (length > host->bounce_buffer_size) {
635 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
636 mmc_hostname(host->mmc), length,
637 host->bounce_buffer_size);
638 return -EIO;
639 }
640 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
641 /* Copy the data to the bounce buffer */
642 if (host->ops->copy_to_bounce_buffer) {
643 host->ops->copy_to_bounce_buffer(host,
644 data, length);
645 } else {
646 sg_copy_to_buffer(data->sg, data->sg_len,
647 host->bounce_buffer, length);
648 }
649 }
650 /* Switch ownership to the DMA */
651 dma_sync_single_for_device(mmc_dev(host->mmc),
652 host->bounce_addr,
653 host->bounce_buffer_size,
654 mmc_get_dma_dir(data));
655 /* Just a dummy value */
656 sg_count = 1;
657 } else {
658 /* Just access the data directly from memory */
659 sg_count = dma_map_sg(mmc_dev(host->mmc),
660 data->sg, data->sg_len,
661 mmc_get_dma_dir(data));
662 }
663
664 if (sg_count == 0)
665 return -ENOSPC;
666
667 data->sg_count = sg_count;
668 data->host_cookie = cookie;
669
670 return sg_count;
671 }
672
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)673 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
674 {
675 local_irq_save(*flags);
676 return kmap_atomic(sg_page(sg)) + sg->offset;
677 }
678
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)679 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
680 {
681 kunmap_atomic(buffer);
682 local_irq_restore(*flags);
683 }
684
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)685 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
686 dma_addr_t addr, int len, unsigned int cmd)
687 {
688 struct sdhci_adma2_64_desc *dma_desc = *desc;
689
690 /* 32-bit and 64-bit descriptors have these members in same position */
691 dma_desc->cmd = cpu_to_le16(cmd);
692 dma_desc->len = cpu_to_le16(len);
693 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
694
695 if (host->flags & SDHCI_USE_64_BIT_DMA)
696 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
697
698 *desc += host->desc_sz;
699 }
700 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
701
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)702 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
703 void **desc, dma_addr_t addr,
704 int len, unsigned int cmd)
705 {
706 if (host->ops->adma_write_desc)
707 host->ops->adma_write_desc(host, desc, addr, len, cmd);
708 else
709 sdhci_adma_write_desc(host, desc, addr, len, cmd);
710 }
711
sdhci_adma_mark_end(void * desc)712 static void sdhci_adma_mark_end(void *desc)
713 {
714 struct sdhci_adma2_64_desc *dma_desc = desc;
715
716 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
717 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
718 }
719
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)720 static void sdhci_adma_table_pre(struct sdhci_host *host,
721 struct mmc_data *data, int sg_count)
722 {
723 struct scatterlist *sg;
724 unsigned long flags;
725 dma_addr_t addr, align_addr;
726 void *desc, *align;
727 char *buffer;
728 int len, offset, i;
729
730 /*
731 * The spec does not specify endianness of descriptor table.
732 * We currently guess that it is LE.
733 */
734
735 host->sg_count = sg_count;
736
737 desc = host->adma_table;
738 align = host->align_buffer;
739
740 align_addr = host->align_addr;
741
742 for_each_sg(data->sg, sg, host->sg_count, i) {
743 addr = sg_dma_address(sg);
744 len = sg_dma_len(sg);
745
746 /*
747 * The SDHCI specification states that ADMA addresses must
748 * be 32-bit aligned. If they aren't, then we use a bounce
749 * buffer for the (up to three) bytes that screw up the
750 * alignment.
751 */
752 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
753 SDHCI_ADMA2_MASK;
754 if (offset) {
755 if (data->flags & MMC_DATA_WRITE) {
756 buffer = sdhci_kmap_atomic(sg, &flags);
757 memcpy(align, buffer, offset);
758 sdhci_kunmap_atomic(buffer, &flags);
759 }
760
761 /* tran, valid */
762 __sdhci_adma_write_desc(host, &desc, align_addr,
763 offset, ADMA2_TRAN_VALID);
764
765 BUG_ON(offset > 65536);
766
767 align += SDHCI_ADMA2_ALIGN;
768 align_addr += SDHCI_ADMA2_ALIGN;
769
770 addr += offset;
771 len -= offset;
772 }
773
774 BUG_ON(len > 65536);
775
776 /* tran, valid */
777 if (len)
778 __sdhci_adma_write_desc(host, &desc, addr, len,
779 ADMA2_TRAN_VALID);
780
781 /*
782 * If this triggers then we have a calculation bug
783 * somewhere. :/
784 */
785 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
786 }
787
788 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
789 /* Mark the last descriptor as the terminating descriptor */
790 if (desc != host->adma_table) {
791 desc -= host->desc_sz;
792 sdhci_adma_mark_end(desc);
793 }
794 } else {
795 /* Add a terminating entry - nop, end, valid */
796 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
797 }
798 }
799
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)800 static void sdhci_adma_table_post(struct sdhci_host *host,
801 struct mmc_data *data)
802 {
803 struct scatterlist *sg;
804 int i, size;
805 void *align;
806 char *buffer;
807 unsigned long flags;
808
809 if (data->flags & MMC_DATA_READ) {
810 bool has_unaligned = false;
811
812 /* Do a quick scan of the SG list for any unaligned mappings */
813 for_each_sg(data->sg, sg, host->sg_count, i)
814 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
815 has_unaligned = true;
816 break;
817 }
818
819 if (has_unaligned) {
820 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
821 data->sg_len, DMA_FROM_DEVICE);
822
823 align = host->align_buffer;
824
825 for_each_sg(data->sg, sg, host->sg_count, i) {
826 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
827 size = SDHCI_ADMA2_ALIGN -
828 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
829
830 buffer = sdhci_kmap_atomic(sg, &flags);
831 memcpy(buffer, align, size);
832 sdhci_kunmap_atomic(buffer, &flags);
833
834 align += SDHCI_ADMA2_ALIGN;
835 }
836 }
837 }
838 }
839 }
840
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)841 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
842 {
843 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
844 if (host->flags & SDHCI_USE_64_BIT_DMA)
845 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
846 }
847
sdhci_sdma_address(struct sdhci_host * host)848 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
849 {
850 if (host->bounce_buffer)
851 return host->bounce_addr;
852 else
853 return sg_dma_address(host->data->sg);
854 }
855
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)856 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
857 {
858 if (host->v4_mode)
859 sdhci_set_adma_addr(host, addr);
860 else
861 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
862 }
863
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)864 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
865 struct mmc_command *cmd,
866 struct mmc_data *data)
867 {
868 unsigned int target_timeout;
869
870 /* timeout in us */
871 if (!data) {
872 target_timeout = cmd->busy_timeout * 1000;
873 } else {
874 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
875 if (host->clock && data->timeout_clks) {
876 unsigned long long val;
877
878 /*
879 * data->timeout_clks is in units of clock cycles.
880 * host->clock is in Hz. target_timeout is in us.
881 * Hence, us = 1000000 * cycles / Hz. Round up.
882 */
883 val = 1000000ULL * data->timeout_clks;
884 if (do_div(val, host->clock))
885 target_timeout++;
886 target_timeout += val;
887 }
888 }
889
890 return target_timeout;
891 }
892
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)893 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
894 struct mmc_command *cmd)
895 {
896 struct mmc_data *data = cmd->data;
897 struct mmc_host *mmc = host->mmc;
898 struct mmc_ios *ios = &mmc->ios;
899 unsigned char bus_width = 1 << ios->bus_width;
900 unsigned int blksz;
901 unsigned int freq;
902 u64 target_timeout;
903 u64 transfer_time;
904
905 target_timeout = sdhci_target_timeout(host, cmd, data);
906 target_timeout *= NSEC_PER_USEC;
907
908 if (data) {
909 blksz = data->blksz;
910 freq = mmc->actual_clock ? : host->clock;
911 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
912 do_div(transfer_time, freq);
913 /* multiply by '2' to account for any unknowns */
914 transfer_time = transfer_time * 2;
915 /* calculate timeout for the entire data */
916 host->data_timeout = data->blocks * target_timeout +
917 transfer_time;
918 } else {
919 host->data_timeout = target_timeout;
920 }
921
922 if (host->data_timeout)
923 host->data_timeout += MMC_CMD_TRANSFER_TIME;
924 }
925
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)926 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
927 bool *too_big)
928 {
929 u8 count;
930 struct mmc_data *data;
931 unsigned target_timeout, current_timeout;
932
933 *too_big = true;
934
935 /*
936 * If the host controller provides us with an incorrect timeout
937 * value, just skip the check and use the maximum. The hardware may take
938 * longer to time out, but that's much better than having a too-short
939 * timeout value.
940 */
941 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
942 return host->max_timeout_count;
943
944 /* Unspecified command, asume max */
945 if (cmd == NULL)
946 return host->max_timeout_count;
947
948 data = cmd->data;
949 /* Unspecified timeout, assume max */
950 if (!data && !cmd->busy_timeout)
951 return host->max_timeout_count;
952
953 /* timeout in us */
954 target_timeout = sdhci_target_timeout(host, cmd, data);
955
956 /*
957 * Figure out needed cycles.
958 * We do this in steps in order to fit inside a 32 bit int.
959 * The first step is the minimum timeout, which will have a
960 * minimum resolution of 6 bits:
961 * (1) 2^13*1000 > 2^22,
962 * (2) host->timeout_clk < 2^16
963 * =>
964 * (1) / (2) > 2^6
965 */
966 count = 0;
967 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
968 while (current_timeout < target_timeout) {
969 count++;
970 current_timeout <<= 1;
971 if (count > host->max_timeout_count)
972 break;
973 }
974
975 if (count > host->max_timeout_count) {
976 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
977 DBG("Too large timeout 0x%x requested for CMD%d!\n",
978 count, cmd->opcode);
979 count = host->max_timeout_count;
980 } else {
981 *too_big = false;
982 }
983
984 return count;
985 }
986
sdhci_set_transfer_irqs(struct sdhci_host * host)987 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
988 {
989 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
990 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
991
992 if (host->flags & SDHCI_REQ_USE_DMA)
993 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
994 else
995 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
996
997 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
998 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
999 else
1000 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1001
1002 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1003 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1004 }
1005
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1006 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1007 {
1008 if (enable)
1009 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1010 else
1011 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1012 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1013 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1014 }
1015 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1016
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1017 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1018 {
1019 bool too_big = false;
1020 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1021
1022 if (too_big &&
1023 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1024 sdhci_calc_sw_timeout(host, cmd);
1025 sdhci_set_data_timeout_irq(host, false);
1026 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1027 sdhci_set_data_timeout_irq(host, true);
1028 }
1029
1030 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1031 }
1032 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1033
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1034 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1035 {
1036 if (host->ops->set_timeout)
1037 host->ops->set_timeout(host, cmd);
1038 else
1039 __sdhci_set_timeout(host, cmd);
1040 }
1041
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1042 static void sdhci_initialize_data(struct sdhci_host *host,
1043 struct mmc_data *data)
1044 {
1045 WARN_ON(host->data);
1046
1047 /* Sanity checks */
1048 BUG_ON(data->blksz * data->blocks > 524288);
1049 BUG_ON(data->blksz > host->mmc->max_blk_size);
1050 BUG_ON(data->blocks > 65535);
1051
1052 host->data = data;
1053 host->data_early = 0;
1054 host->data->bytes_xfered = 0;
1055 }
1056
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1057 static inline void sdhci_set_block_info(struct sdhci_host *host,
1058 struct mmc_data *data)
1059 {
1060 /* Set the DMA boundary value and block size */
1061 sdhci_writew(host,
1062 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1063 SDHCI_BLOCK_SIZE);
1064 /*
1065 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1066 * can be supported, in that case 16-bit block count register must be 0.
1067 */
1068 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1069 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1070 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1071 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1072 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1073 } else {
1074 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1075 }
1076 }
1077
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1078 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1079 {
1080 struct mmc_data *data = cmd->data;
1081
1082 sdhci_initialize_data(host, data);
1083
1084 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1085 struct scatterlist *sg;
1086 unsigned int length_mask, offset_mask;
1087 int i;
1088
1089 host->flags |= SDHCI_REQ_USE_DMA;
1090
1091 /*
1092 * FIXME: This doesn't account for merging when mapping the
1093 * scatterlist.
1094 *
1095 * The assumption here being that alignment and lengths are
1096 * the same after DMA mapping to device address space.
1097 */
1098 length_mask = 0;
1099 offset_mask = 0;
1100 if (host->flags & SDHCI_USE_ADMA) {
1101 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1102 length_mask = 3;
1103 /*
1104 * As we use up to 3 byte chunks to work
1105 * around alignment problems, we need to
1106 * check the offset as well.
1107 */
1108 offset_mask = 3;
1109 }
1110 } else {
1111 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1112 length_mask = 3;
1113 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1114 offset_mask = 3;
1115 }
1116
1117 if (unlikely(length_mask | offset_mask)) {
1118 for_each_sg(data->sg, sg, data->sg_len, i) {
1119 if (sg->length & length_mask) {
1120 DBG("Reverting to PIO because of transfer size (%d)\n",
1121 sg->length);
1122 host->flags &= ~SDHCI_REQ_USE_DMA;
1123 break;
1124 }
1125 if (sg->offset & offset_mask) {
1126 DBG("Reverting to PIO because of bad alignment\n");
1127 host->flags &= ~SDHCI_REQ_USE_DMA;
1128 break;
1129 }
1130 }
1131 }
1132 }
1133
1134 if (host->flags & SDHCI_REQ_USE_DMA) {
1135 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1136
1137 if (sg_cnt <= 0) {
1138 /*
1139 * This only happens when someone fed
1140 * us an invalid request.
1141 */
1142 WARN_ON(1);
1143 host->flags &= ~SDHCI_REQ_USE_DMA;
1144 } else if (host->flags & SDHCI_USE_ADMA) {
1145 sdhci_adma_table_pre(host, data, sg_cnt);
1146 sdhci_set_adma_addr(host, host->adma_addr);
1147 } else {
1148 WARN_ON(sg_cnt != 1);
1149 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1150 }
1151 }
1152
1153 sdhci_config_dma(host);
1154
1155 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1156 int flags;
1157
1158 flags = SG_MITER_ATOMIC;
1159 if (host->data->flags & MMC_DATA_READ)
1160 flags |= SG_MITER_TO_SG;
1161 else
1162 flags |= SG_MITER_FROM_SG;
1163 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1164 host->blocks = data->blocks;
1165 }
1166
1167 sdhci_set_transfer_irqs(host);
1168
1169 sdhci_set_block_info(host, data);
1170 }
1171
1172 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1173
sdhci_external_dma_init(struct sdhci_host * host)1174 static int sdhci_external_dma_init(struct sdhci_host *host)
1175 {
1176 int ret = 0;
1177 struct mmc_host *mmc = host->mmc;
1178
1179 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1180 if (IS_ERR(host->tx_chan)) {
1181 ret = PTR_ERR(host->tx_chan);
1182 if (ret != -EPROBE_DEFER)
1183 pr_warn("Failed to request TX DMA channel.\n");
1184 host->tx_chan = NULL;
1185 return ret;
1186 }
1187
1188 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1189 if (IS_ERR(host->rx_chan)) {
1190 if (host->tx_chan) {
1191 dma_release_channel(host->tx_chan);
1192 host->tx_chan = NULL;
1193 }
1194
1195 ret = PTR_ERR(host->rx_chan);
1196 if (ret != -EPROBE_DEFER)
1197 pr_warn("Failed to request RX DMA channel.\n");
1198 host->rx_chan = NULL;
1199 }
1200
1201 return ret;
1202 }
1203
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1204 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1205 struct mmc_data *data)
1206 {
1207 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1208 }
1209
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1210 static int sdhci_external_dma_setup(struct sdhci_host *host,
1211 struct mmc_command *cmd)
1212 {
1213 int ret, i;
1214 enum dma_transfer_direction dir;
1215 struct dma_async_tx_descriptor *desc;
1216 struct mmc_data *data = cmd->data;
1217 struct dma_chan *chan;
1218 struct dma_slave_config cfg;
1219 dma_cookie_t cookie;
1220 int sg_cnt;
1221
1222 if (!host->mapbase)
1223 return -EINVAL;
1224
1225 memset(&cfg, 0, sizeof(cfg));
1226 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1227 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1228 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1229 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1230 cfg.src_maxburst = data->blksz / 4;
1231 cfg.dst_maxburst = data->blksz / 4;
1232
1233 /* Sanity check: all the SG entries must be aligned by block size. */
1234 for (i = 0; i < data->sg_len; i++) {
1235 if ((data->sg + i)->length % data->blksz)
1236 return -EINVAL;
1237 }
1238
1239 chan = sdhci_external_dma_channel(host, data);
1240
1241 ret = dmaengine_slave_config(chan, &cfg);
1242 if (ret)
1243 return ret;
1244
1245 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1246 if (sg_cnt <= 0)
1247 return -EINVAL;
1248
1249 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1250 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1251 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1252 if (!desc)
1253 return -EINVAL;
1254
1255 desc->callback = NULL;
1256 desc->callback_param = NULL;
1257
1258 cookie = dmaengine_submit(desc);
1259 if (dma_submit_error(cookie))
1260 ret = cookie;
1261
1262 return ret;
1263 }
1264
sdhci_external_dma_release(struct sdhci_host * host)1265 static void sdhci_external_dma_release(struct sdhci_host *host)
1266 {
1267 if (host->tx_chan) {
1268 dma_release_channel(host->tx_chan);
1269 host->tx_chan = NULL;
1270 }
1271
1272 if (host->rx_chan) {
1273 dma_release_channel(host->rx_chan);
1274 host->rx_chan = NULL;
1275 }
1276
1277 sdhci_switch_external_dma(host, false);
1278 }
1279
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1280 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1281 struct mmc_command *cmd)
1282 {
1283 struct mmc_data *data = cmd->data;
1284
1285 sdhci_initialize_data(host, data);
1286
1287 host->flags |= SDHCI_REQ_USE_DMA;
1288 sdhci_set_transfer_irqs(host);
1289
1290 sdhci_set_block_info(host, data);
1291 }
1292
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1293 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1294 struct mmc_command *cmd)
1295 {
1296 if (!sdhci_external_dma_setup(host, cmd)) {
1297 __sdhci_external_dma_prepare_data(host, cmd);
1298 } else {
1299 sdhci_external_dma_release(host);
1300 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1301 mmc_hostname(host->mmc));
1302 sdhci_prepare_data(host, cmd);
1303 }
1304 }
1305
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1306 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1307 struct mmc_command *cmd)
1308 {
1309 struct dma_chan *chan;
1310
1311 if (!cmd->data)
1312 return;
1313
1314 chan = sdhci_external_dma_channel(host, cmd->data);
1315 if (chan)
1316 dma_async_issue_pending(chan);
1317 }
1318
1319 #else
1320
sdhci_external_dma_init(struct sdhci_host * host)1321 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1322 {
1323 return -EOPNOTSUPP;
1324 }
1325
sdhci_external_dma_release(struct sdhci_host * host)1326 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1327 {
1328 }
1329
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1330 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1331 struct mmc_command *cmd)
1332 {
1333 /* This should never happen */
1334 WARN_ON_ONCE(1);
1335 }
1336
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1337 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1338 struct mmc_command *cmd)
1339 {
1340 }
1341
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1342 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1343 struct mmc_data *data)
1344 {
1345 return NULL;
1346 }
1347
1348 #endif
1349
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1350 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1351 {
1352 host->use_external_dma = en;
1353 }
1354 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1355
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1356 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1357 struct mmc_request *mrq)
1358 {
1359 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1360 !mrq->cap_cmd_during_tfr;
1361 }
1362
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1363 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1364 struct mmc_request *mrq)
1365 {
1366 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1367 }
1368
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1369 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1370 struct mmc_request *mrq)
1371 {
1372 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1373 }
1374
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1375 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1376 struct mmc_command *cmd,
1377 u16 *mode)
1378 {
1379 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1380 (cmd->opcode != SD_IO_RW_EXTENDED);
1381 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1382 u16 ctrl2;
1383
1384 /*
1385 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1386 * Select' is recommended rather than use of 'Auto CMD12
1387 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1388 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1389 */
1390 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1391 (use_cmd12 || use_cmd23)) {
1392 *mode |= SDHCI_TRNS_AUTO_SEL;
1393
1394 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1395 if (use_cmd23)
1396 ctrl2 |= SDHCI_CMD23_ENABLE;
1397 else
1398 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1399 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1400
1401 return;
1402 }
1403
1404 /*
1405 * If we are sending CMD23, CMD12 never gets sent
1406 * on successful completion (so no Auto-CMD12).
1407 */
1408 if (use_cmd12)
1409 *mode |= SDHCI_TRNS_AUTO_CMD12;
1410 else if (use_cmd23)
1411 *mode |= SDHCI_TRNS_AUTO_CMD23;
1412 }
1413
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1414 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1415 struct mmc_command *cmd)
1416 {
1417 u16 mode = 0;
1418 struct mmc_data *data = cmd->data;
1419
1420 if (data == NULL) {
1421 if (host->quirks2 &
1422 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1423 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1424 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1425 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1426 } else {
1427 /* clear Auto CMD settings for no data CMDs */
1428 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1429 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1430 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1431 }
1432 return;
1433 }
1434
1435 WARN_ON(!host->data);
1436
1437 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1438 mode = SDHCI_TRNS_BLK_CNT_EN;
1439
1440 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1441 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1442 sdhci_auto_cmd_select(host, cmd, &mode);
1443 if (sdhci_auto_cmd23(host, cmd->mrq))
1444 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1445 }
1446
1447 if (data->flags & MMC_DATA_READ)
1448 mode |= SDHCI_TRNS_READ;
1449 if (host->flags & SDHCI_REQ_USE_DMA)
1450 mode |= SDHCI_TRNS_DMA;
1451
1452 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1453 }
1454
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1455 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1456 {
1457 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1458 ((mrq->cmd && mrq->cmd->error) ||
1459 (mrq->sbc && mrq->sbc->error) ||
1460 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1461 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1462 }
1463
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1464 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1465 {
1466 int i;
1467
1468 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1469 if (host->mrqs_done[i] == mrq) {
1470 WARN_ON(1);
1471 return;
1472 }
1473 }
1474
1475 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1476 if (!host->mrqs_done[i]) {
1477 host->mrqs_done[i] = mrq;
1478 break;
1479 }
1480 }
1481
1482 WARN_ON(i >= SDHCI_MAX_MRQS);
1483 }
1484
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1485 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1486 {
1487 if (host->cmd && host->cmd->mrq == mrq)
1488 host->cmd = NULL;
1489
1490 if (host->data_cmd && host->data_cmd->mrq == mrq)
1491 host->data_cmd = NULL;
1492
1493 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1494 host->deferred_cmd = NULL;
1495
1496 if (host->data && host->data->mrq == mrq)
1497 host->data = NULL;
1498
1499 if (sdhci_needs_reset(host, mrq))
1500 host->pending_reset = true;
1501
1502 sdhci_set_mrq_done(host, mrq);
1503
1504 sdhci_del_timer(host, mrq);
1505
1506 if (!sdhci_has_requests(host))
1507 sdhci_led_deactivate(host);
1508 }
1509
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1510 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1511 {
1512 __sdhci_finish_mrq(host, mrq);
1513
1514 queue_work(host->complete_wq, &host->complete_work);
1515 }
1516
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1517 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1518 {
1519 struct mmc_command *data_cmd = host->data_cmd;
1520 struct mmc_data *data = host->data;
1521
1522 host->data = NULL;
1523 host->data_cmd = NULL;
1524
1525 /*
1526 * The controller needs a reset of internal state machines upon error
1527 * conditions.
1528 */
1529 if (data->error) {
1530 if (!host->cmd || host->cmd == data_cmd)
1531 sdhci_do_reset(host, SDHCI_RESET_CMD);
1532 sdhci_do_reset(host, SDHCI_RESET_DATA);
1533 }
1534
1535 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1536 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1537 sdhci_adma_table_post(host, data);
1538
1539 /*
1540 * The specification states that the block count register must
1541 * be updated, but it does not specify at what point in the
1542 * data flow. That makes the register entirely useless to read
1543 * back so we have to assume that nothing made it to the card
1544 * in the event of an error.
1545 */
1546 if (data->error)
1547 data->bytes_xfered = 0;
1548 else
1549 data->bytes_xfered = data->blksz * data->blocks;
1550
1551 /*
1552 * Need to send CMD12 if -
1553 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1554 * b) error in multiblock transfer
1555 */
1556 if (data->stop &&
1557 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1558 data->error)) {
1559 /*
1560 * 'cap_cmd_during_tfr' request must not use the command line
1561 * after mmc_command_done() has been called. It is upper layer's
1562 * responsibility to send the stop command if required.
1563 */
1564 if (data->mrq->cap_cmd_during_tfr) {
1565 __sdhci_finish_mrq(host, data->mrq);
1566 } else {
1567 /* Avoid triggering warning in sdhci_send_command() */
1568 host->cmd = NULL;
1569 if (!sdhci_send_command(host, data->stop)) {
1570 if (sw_data_timeout) {
1571 /*
1572 * This is anyway a sw data timeout, so
1573 * give up now.
1574 */
1575 data->stop->error = -EIO;
1576 __sdhci_finish_mrq(host, data->mrq);
1577 } else {
1578 WARN_ON(host->deferred_cmd);
1579 host->deferred_cmd = data->stop;
1580 }
1581 }
1582 }
1583 } else {
1584 __sdhci_finish_mrq(host, data->mrq);
1585 }
1586 }
1587
sdhci_finish_data(struct sdhci_host * host)1588 static void sdhci_finish_data(struct sdhci_host *host)
1589 {
1590 __sdhci_finish_data(host, false);
1591 }
1592
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1593 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1594 {
1595 int flags;
1596 u32 mask;
1597 unsigned long timeout;
1598
1599 WARN_ON(host->cmd);
1600
1601 /* Initially, a command has no error */
1602 cmd->error = 0;
1603
1604 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1605 cmd->opcode == MMC_STOP_TRANSMISSION)
1606 cmd->flags |= MMC_RSP_BUSY;
1607
1608 mask = SDHCI_CMD_INHIBIT;
1609 if (sdhci_data_line_cmd(cmd))
1610 mask |= SDHCI_DATA_INHIBIT;
1611
1612 /* We shouldn't wait for data inihibit for stop commands, even
1613 though they might use busy signaling */
1614 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1615 mask &= ~SDHCI_DATA_INHIBIT;
1616
1617 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1618 return false;
1619
1620 host->cmd = cmd;
1621 host->data_timeout = 0;
1622 if (sdhci_data_line_cmd(cmd)) {
1623 WARN_ON(host->data_cmd);
1624 host->data_cmd = cmd;
1625 sdhci_set_timeout(host, cmd);
1626 }
1627
1628 if (cmd->data) {
1629 if (host->use_external_dma)
1630 sdhci_external_dma_prepare_data(host, cmd);
1631 else
1632 sdhci_prepare_data(host, cmd);
1633 }
1634
1635 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1636
1637 sdhci_set_transfer_mode(host, cmd);
1638
1639 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1640 WARN_ONCE(1, "Unsupported response type!\n");
1641 /*
1642 * This does not happen in practice because 136-bit response
1643 * commands never have busy waiting, so rather than complicate
1644 * the error path, just remove busy waiting and continue.
1645 */
1646 cmd->flags &= ~MMC_RSP_BUSY;
1647 }
1648
1649 if (!(cmd->flags & MMC_RSP_PRESENT))
1650 flags = SDHCI_CMD_RESP_NONE;
1651 else if (cmd->flags & MMC_RSP_136)
1652 flags = SDHCI_CMD_RESP_LONG;
1653 else if (cmd->flags & MMC_RSP_BUSY)
1654 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1655 else
1656 flags = SDHCI_CMD_RESP_SHORT;
1657
1658 if (cmd->flags & MMC_RSP_CRC)
1659 flags |= SDHCI_CMD_CRC;
1660 if (cmd->flags & MMC_RSP_OPCODE)
1661 flags |= SDHCI_CMD_INDEX;
1662
1663 /* CMD19 is special in that the Data Present Select should be set */
1664 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1665 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1666 flags |= SDHCI_CMD_DATA;
1667
1668 timeout = jiffies;
1669 if (host->data_timeout)
1670 timeout += nsecs_to_jiffies(host->data_timeout);
1671 else if (!cmd->data && cmd->busy_timeout > 9000)
1672 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1673 else
1674 timeout += 10 * HZ;
1675 sdhci_mod_timer(host, cmd->mrq, timeout);
1676
1677 if (host->use_external_dma)
1678 sdhci_external_dma_pre_transfer(host, cmd);
1679
1680 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1681
1682 return true;
1683 }
1684
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1685 static bool sdhci_present_error(struct sdhci_host *host,
1686 struct mmc_command *cmd, bool present)
1687 {
1688 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1689 cmd->error = -ENOMEDIUM;
1690 return true;
1691 }
1692
1693 return false;
1694 }
1695
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1696 static bool sdhci_send_command_retry(struct sdhci_host *host,
1697 struct mmc_command *cmd,
1698 unsigned long flags)
1699 __releases(host->lock)
1700 __acquires(host->lock)
1701 {
1702 struct mmc_command *deferred_cmd = host->deferred_cmd;
1703 int timeout = 10; /* Approx. 10 ms */
1704 bool present;
1705
1706 while (!sdhci_send_command(host, cmd)) {
1707 if (!timeout--) {
1708 pr_err("%s: Controller never released inhibit bit(s).\n",
1709 mmc_hostname(host->mmc));
1710 sdhci_dumpregs(host);
1711 cmd->error = -EIO;
1712 return false;
1713 }
1714
1715 spin_unlock_irqrestore(&host->lock, flags);
1716
1717 usleep_range(1000, 1250);
1718
1719 present = host->mmc->ops->get_cd(host->mmc);
1720
1721 spin_lock_irqsave(&host->lock, flags);
1722
1723 /* A deferred command might disappear, handle that */
1724 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1725 return true;
1726
1727 if (sdhci_present_error(host, cmd, present))
1728 return false;
1729 }
1730
1731 if (cmd == host->deferred_cmd)
1732 host->deferred_cmd = NULL;
1733
1734 return true;
1735 }
1736
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1737 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1738 {
1739 int i, reg;
1740
1741 for (i = 0; i < 4; i++) {
1742 reg = SDHCI_RESPONSE + (3 - i) * 4;
1743 cmd->resp[i] = sdhci_readl(host, reg);
1744 }
1745
1746 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1747 return;
1748
1749 /* CRC is stripped so we need to do some shifting */
1750 for (i = 0; i < 4; i++) {
1751 cmd->resp[i] <<= 8;
1752 if (i != 3)
1753 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1754 }
1755 }
1756
sdhci_finish_command(struct sdhci_host * host)1757 static void sdhci_finish_command(struct sdhci_host *host)
1758 {
1759 struct mmc_command *cmd = host->cmd;
1760
1761 host->cmd = NULL;
1762
1763 if (cmd->flags & MMC_RSP_PRESENT) {
1764 if (cmd->flags & MMC_RSP_136) {
1765 sdhci_read_rsp_136(host, cmd);
1766 } else {
1767 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1768 }
1769 }
1770
1771 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1772 mmc_command_done(host->mmc, cmd->mrq);
1773
1774 /*
1775 * The host can send and interrupt when the busy state has
1776 * ended, allowing us to wait without wasting CPU cycles.
1777 * The busy signal uses DAT0 so this is similar to waiting
1778 * for data to complete.
1779 *
1780 * Note: The 1.0 specification is a bit ambiguous about this
1781 * feature so there might be some problems with older
1782 * controllers.
1783 */
1784 if (cmd->flags & MMC_RSP_BUSY) {
1785 if (cmd->data) {
1786 DBG("Cannot wait for busy signal when also doing a data transfer");
1787 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1788 cmd == host->data_cmd) {
1789 /* Command complete before busy is ended */
1790 return;
1791 }
1792 }
1793
1794 /* Finished CMD23, now send actual command. */
1795 if (cmd == cmd->mrq->sbc) {
1796 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1797 WARN_ON(host->deferred_cmd);
1798 host->deferred_cmd = cmd->mrq->cmd;
1799 }
1800 } else {
1801
1802 /* Processed actual command. */
1803 if (host->data && host->data_early)
1804 sdhci_finish_data(host);
1805
1806 if (!cmd->data)
1807 __sdhci_finish_mrq(host, cmd->mrq);
1808 }
1809 }
1810
sdhci_get_preset_value(struct sdhci_host * host)1811 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1812 {
1813 u16 preset = 0;
1814
1815 switch (host->timing) {
1816 case MMC_TIMING_MMC_HS:
1817 case MMC_TIMING_SD_HS:
1818 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1819 break;
1820 case MMC_TIMING_UHS_SDR12:
1821 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1822 break;
1823 case MMC_TIMING_UHS_SDR25:
1824 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1825 break;
1826 case MMC_TIMING_UHS_SDR50:
1827 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1828 break;
1829 case MMC_TIMING_UHS_SDR104:
1830 case MMC_TIMING_MMC_HS200:
1831 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1832 break;
1833 case MMC_TIMING_UHS_DDR50:
1834 case MMC_TIMING_MMC_DDR52:
1835 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1836 break;
1837 case MMC_TIMING_MMC_HS400:
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1839 break;
1840 default:
1841 pr_warn("%s: Invalid UHS-I mode selected\n",
1842 mmc_hostname(host->mmc));
1843 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1844 break;
1845 }
1846 return preset;
1847 }
1848
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1849 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1850 unsigned int *actual_clock)
1851 {
1852 int div = 0; /* Initialized for compiler warning */
1853 int real_div = div, clk_mul = 1;
1854 u16 clk = 0;
1855 bool switch_base_clk = false;
1856
1857 if (host->version >= SDHCI_SPEC_300) {
1858 if (host->preset_enabled) {
1859 u16 pre_val;
1860
1861 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1862 pre_val = sdhci_get_preset_value(host);
1863 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1864 if (host->clk_mul &&
1865 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1866 clk = SDHCI_PROG_CLOCK_MODE;
1867 real_div = div + 1;
1868 clk_mul = host->clk_mul;
1869 } else {
1870 real_div = max_t(int, 1, div << 1);
1871 }
1872 goto clock_set;
1873 }
1874
1875 /*
1876 * Check if the Host Controller supports Programmable Clock
1877 * Mode.
1878 */
1879 if (host->clk_mul) {
1880 for (div = 1; div <= 1024; div++) {
1881 if ((host->max_clk * host->clk_mul / div)
1882 <= clock)
1883 break;
1884 }
1885 if ((host->max_clk * host->clk_mul / div) <= clock) {
1886 /*
1887 * Set Programmable Clock Mode in the Clock
1888 * Control register.
1889 */
1890 clk = SDHCI_PROG_CLOCK_MODE;
1891 real_div = div;
1892 clk_mul = host->clk_mul;
1893 div--;
1894 } else {
1895 /*
1896 * Divisor can be too small to reach clock
1897 * speed requirement. Then use the base clock.
1898 */
1899 switch_base_clk = true;
1900 }
1901 }
1902
1903 if (!host->clk_mul || switch_base_clk) {
1904 /* Version 3.00 divisors must be a multiple of 2. */
1905 if (host->max_clk <= clock)
1906 div = 1;
1907 else {
1908 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1909 div += 2) {
1910 if ((host->max_clk / div) <= clock)
1911 break;
1912 }
1913 }
1914 real_div = div;
1915 div >>= 1;
1916 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1917 && !div && host->max_clk <= 25000000)
1918 div = 1;
1919 }
1920 } else {
1921 /* Version 2.00 divisors must be a power of 2. */
1922 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1923 if ((host->max_clk / div) <= clock)
1924 break;
1925 }
1926 real_div = div;
1927 div >>= 1;
1928 }
1929
1930 clock_set:
1931 if (real_div)
1932 *actual_clock = (host->max_clk * clk_mul) / real_div;
1933 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1934 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1935 << SDHCI_DIVIDER_HI_SHIFT;
1936
1937 return clk;
1938 }
1939 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1940
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1941 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1942 {
1943 ktime_t timeout;
1944
1945 clk |= SDHCI_CLOCK_INT_EN;
1946 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1947
1948 /* Wait max 150 ms */
1949 timeout = ktime_add_ms(ktime_get(), 150);
1950 while (1) {
1951 bool timedout = ktime_after(ktime_get(), timeout);
1952
1953 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1954 if (clk & SDHCI_CLOCK_INT_STABLE)
1955 break;
1956 if (timedout) {
1957 pr_err("%s: Internal clock never stabilised.\n",
1958 mmc_hostname(host->mmc));
1959 sdhci_dumpregs(host);
1960 return;
1961 }
1962 udelay(10);
1963 }
1964
1965 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1966 clk |= SDHCI_CLOCK_PLL_EN;
1967 clk &= ~SDHCI_CLOCK_INT_STABLE;
1968 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1969
1970 /* Wait max 150 ms */
1971 timeout = ktime_add_ms(ktime_get(), 150);
1972 while (1) {
1973 bool timedout = ktime_after(ktime_get(), timeout);
1974
1975 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1976 if (clk & SDHCI_CLOCK_INT_STABLE)
1977 break;
1978 if (timedout) {
1979 pr_err("%s: PLL clock never stabilised.\n",
1980 mmc_hostname(host->mmc));
1981 sdhci_dumpregs(host);
1982 return;
1983 }
1984 udelay(10);
1985 }
1986 }
1987
1988 clk |= SDHCI_CLOCK_CARD_EN;
1989 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1990 }
1991 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1992
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)1993 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1994 {
1995 u16 clk;
1996
1997 host->mmc->actual_clock = 0;
1998
1999 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2000
2001 if (clock == 0)
2002 return;
2003
2004 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2005 sdhci_enable_clk(host, clk);
2006 }
2007 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2008
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2009 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2010 unsigned short vdd)
2011 {
2012 struct mmc_host *mmc = host->mmc;
2013
2014 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2015
2016 if (mode != MMC_POWER_OFF)
2017 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2018 else
2019 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2020 }
2021
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2022 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2023 unsigned short vdd)
2024 {
2025 u8 pwr = 0;
2026
2027 if (mode != MMC_POWER_OFF) {
2028 switch (1 << vdd) {
2029 case MMC_VDD_165_195:
2030 /*
2031 * Without a regulator, SDHCI does not support 2.0v
2032 * so we only get here if the driver deliberately
2033 * added the 2.0v range to ocr_avail. Map it to 1.8v
2034 * for the purpose of turning on the power.
2035 */
2036 case MMC_VDD_20_21:
2037 pwr = SDHCI_POWER_180;
2038 break;
2039 case MMC_VDD_29_30:
2040 case MMC_VDD_30_31:
2041 pwr = SDHCI_POWER_300;
2042 break;
2043 case MMC_VDD_32_33:
2044 case MMC_VDD_33_34:
2045 /*
2046 * 3.4 ~ 3.6V are valid only for those platforms where it's
2047 * known that the voltage range is supported by hardware.
2048 */
2049 case MMC_VDD_34_35:
2050 case MMC_VDD_35_36:
2051 pwr = SDHCI_POWER_330;
2052 break;
2053 default:
2054 WARN(1, "%s: Invalid vdd %#x\n",
2055 mmc_hostname(host->mmc), vdd);
2056 break;
2057 }
2058 }
2059
2060 if (host->pwr == pwr)
2061 return;
2062
2063 host->pwr = pwr;
2064
2065 if (pwr == 0) {
2066 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2067 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2068 sdhci_runtime_pm_bus_off(host);
2069 } else {
2070 /*
2071 * Spec says that we should clear the power reg before setting
2072 * a new value. Some controllers don't seem to like this though.
2073 */
2074 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2075 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2076
2077 /*
2078 * At least the Marvell CaFe chip gets confused if we set the
2079 * voltage and set turn on power at the same time, so set the
2080 * voltage first.
2081 */
2082 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2083 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2084
2085 pwr |= SDHCI_POWER_ON;
2086
2087 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2088
2089 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2090 sdhci_runtime_pm_bus_on(host);
2091
2092 /*
2093 * Some controllers need an extra 10ms delay of 10ms before
2094 * they can apply clock after applying power
2095 */
2096 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2097 mdelay(10);
2098 }
2099 }
2100 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2101
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2102 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2103 unsigned short vdd)
2104 {
2105 if (IS_ERR(host->mmc->supply.vmmc))
2106 sdhci_set_power_noreg(host, mode, vdd);
2107 else
2108 sdhci_set_power_reg(host, mode, vdd);
2109 }
2110 EXPORT_SYMBOL_GPL(sdhci_set_power);
2111
2112 /*
2113 * Some controllers need to configure a valid bus voltage on their power
2114 * register regardless of whether an external regulator is taking care of power
2115 * supply. This helper function takes care of it if set as the controller's
2116 * sdhci_ops.set_power callback.
2117 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2118 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2119 unsigned char mode,
2120 unsigned short vdd)
2121 {
2122 if (!IS_ERR(host->mmc->supply.vmmc)) {
2123 struct mmc_host *mmc = host->mmc;
2124
2125 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2126 }
2127 sdhci_set_power_noreg(host, mode, vdd);
2128 }
2129 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2130
2131 /*****************************************************************************\
2132 * *
2133 * MMC callbacks *
2134 * *
2135 \*****************************************************************************/
2136
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2137 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2138 {
2139 struct sdhci_host *host = mmc_priv(mmc);
2140 struct mmc_command *cmd;
2141 unsigned long flags;
2142 bool present;
2143
2144 /* Firstly check card presence */
2145 present = mmc->ops->get_cd(mmc);
2146
2147 spin_lock_irqsave(&host->lock, flags);
2148
2149 sdhci_led_activate(host);
2150
2151 if (sdhci_present_error(host, mrq->cmd, present))
2152 goto out_finish;
2153
2154 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2155
2156 if (!sdhci_send_command_retry(host, cmd, flags))
2157 goto out_finish;
2158
2159 spin_unlock_irqrestore(&host->lock, flags);
2160
2161 return;
2162
2163 out_finish:
2164 sdhci_finish_mrq(host, mrq);
2165 spin_unlock_irqrestore(&host->lock, flags);
2166 }
2167 EXPORT_SYMBOL_GPL(sdhci_request);
2168
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2169 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2170 {
2171 struct sdhci_host *host = mmc_priv(mmc);
2172 struct mmc_command *cmd;
2173 unsigned long flags;
2174 int ret = 0;
2175
2176 spin_lock_irqsave(&host->lock, flags);
2177
2178 if (sdhci_present_error(host, mrq->cmd, true)) {
2179 sdhci_finish_mrq(host, mrq);
2180 goto out_finish;
2181 }
2182
2183 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2184
2185 /*
2186 * The HSQ may send a command in interrupt context without polling
2187 * the busy signaling, which means we should return BUSY if controller
2188 * has not released inhibit bits to allow HSQ trying to send request
2189 * again in non-atomic context. So we should not finish this request
2190 * here.
2191 */
2192 if (!sdhci_send_command(host, cmd))
2193 ret = -EBUSY;
2194 else
2195 sdhci_led_activate(host);
2196
2197 out_finish:
2198 spin_unlock_irqrestore(&host->lock, flags);
2199 return ret;
2200 }
2201 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2202
sdhci_set_bus_width(struct sdhci_host * host,int width)2203 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2204 {
2205 u8 ctrl;
2206
2207 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2208 if (width == MMC_BUS_WIDTH_8) {
2209 ctrl &= ~SDHCI_CTRL_4BITBUS;
2210 ctrl |= SDHCI_CTRL_8BITBUS;
2211 } else {
2212 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2213 ctrl &= ~SDHCI_CTRL_8BITBUS;
2214 if (width == MMC_BUS_WIDTH_4)
2215 ctrl |= SDHCI_CTRL_4BITBUS;
2216 else
2217 ctrl &= ~SDHCI_CTRL_4BITBUS;
2218 }
2219 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2220 }
2221 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2222
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2223 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2224 {
2225 u16 ctrl_2;
2226
2227 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2228 /* Select Bus Speed Mode for host */
2229 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2230 if ((timing == MMC_TIMING_MMC_HS200) ||
2231 (timing == MMC_TIMING_UHS_SDR104))
2232 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2233 else if (timing == MMC_TIMING_UHS_SDR12)
2234 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2235 else if (timing == MMC_TIMING_UHS_SDR25)
2236 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2237 else if (timing == MMC_TIMING_UHS_SDR50)
2238 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2239 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2240 (timing == MMC_TIMING_MMC_DDR52))
2241 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2242 else if (timing == MMC_TIMING_MMC_HS400)
2243 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2244 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2245 }
2246 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2247
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2248 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2249 {
2250 struct sdhci_host *host = mmc_priv(mmc);
2251 u8 ctrl;
2252
2253 if (ios->power_mode == MMC_POWER_UNDEFINED)
2254 return;
2255
2256 if (host->flags & SDHCI_DEVICE_DEAD) {
2257 if (!IS_ERR(mmc->supply.vmmc) &&
2258 ios->power_mode == MMC_POWER_OFF)
2259 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2260 return;
2261 }
2262
2263 /*
2264 * Reset the chip on each power off.
2265 * Should clear out any weird states.
2266 */
2267 if (ios->power_mode == MMC_POWER_OFF) {
2268 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2269 sdhci_reinit(host);
2270 }
2271
2272 if (host->version >= SDHCI_SPEC_300 &&
2273 (ios->power_mode == MMC_POWER_UP) &&
2274 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2275 sdhci_enable_preset_value(host, false);
2276
2277 if (!ios->clock || ios->clock != host->clock) {
2278 host->ops->set_clock(host, ios->clock);
2279 host->clock = ios->clock;
2280
2281 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2282 host->clock) {
2283 host->timeout_clk = mmc->actual_clock ?
2284 mmc->actual_clock / 1000 :
2285 host->clock / 1000;
2286 mmc->max_busy_timeout =
2287 host->ops->get_max_timeout_count ?
2288 host->ops->get_max_timeout_count(host) :
2289 1 << 27;
2290 mmc->max_busy_timeout /= host->timeout_clk;
2291 }
2292 }
2293
2294 if (host->ops->set_power)
2295 host->ops->set_power(host, ios->power_mode, ios->vdd);
2296 else
2297 sdhci_set_power(host, ios->power_mode, ios->vdd);
2298
2299 if (host->ops->platform_send_init_74_clocks)
2300 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2301
2302 host->ops->set_bus_width(host, ios->bus_width);
2303
2304 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2305
2306 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2307 if (ios->timing == MMC_TIMING_SD_HS ||
2308 ios->timing == MMC_TIMING_MMC_HS ||
2309 ios->timing == MMC_TIMING_MMC_HS400 ||
2310 ios->timing == MMC_TIMING_MMC_HS200 ||
2311 ios->timing == MMC_TIMING_MMC_DDR52 ||
2312 ios->timing == MMC_TIMING_UHS_SDR50 ||
2313 ios->timing == MMC_TIMING_UHS_SDR104 ||
2314 ios->timing == MMC_TIMING_UHS_DDR50 ||
2315 ios->timing == MMC_TIMING_UHS_SDR25)
2316 ctrl |= SDHCI_CTRL_HISPD;
2317 else
2318 ctrl &= ~SDHCI_CTRL_HISPD;
2319 }
2320
2321 if (host->version >= SDHCI_SPEC_300) {
2322 u16 clk, ctrl_2;
2323
2324 if (!host->preset_enabled) {
2325 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2326 /*
2327 * We only need to set Driver Strength if the
2328 * preset value enable is not set.
2329 */
2330 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2331 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2332 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2333 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2334 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2335 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2336 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2337 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2338 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2339 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2340 else {
2341 pr_warn("%s: invalid driver type, default to driver type B\n",
2342 mmc_hostname(mmc));
2343 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2344 }
2345
2346 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2347 } else {
2348 /*
2349 * According to SDHC Spec v3.00, if the Preset Value
2350 * Enable in the Host Control 2 register is set, we
2351 * need to reset SD Clock Enable before changing High
2352 * Speed Enable to avoid generating clock gliches.
2353 */
2354
2355 /* Reset SD Clock Enable */
2356 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2357 clk &= ~SDHCI_CLOCK_CARD_EN;
2358 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2359
2360 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2361
2362 /* Re-enable SD Clock */
2363 host->ops->set_clock(host, host->clock);
2364 }
2365
2366 /* Reset SD Clock Enable */
2367 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2368 clk &= ~SDHCI_CLOCK_CARD_EN;
2369 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2370
2371 host->ops->set_uhs_signaling(host, ios->timing);
2372 host->timing = ios->timing;
2373
2374 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2375 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2376 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2377 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2378 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2379 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2380 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2381 u16 preset;
2382
2383 sdhci_enable_preset_value(host, true);
2384 preset = sdhci_get_preset_value(host);
2385 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2386 preset);
2387 }
2388
2389 /* Re-enable SD Clock */
2390 host->ops->set_clock(host, host->clock);
2391 } else
2392 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2393
2394 /*
2395 * Some (ENE) controllers go apeshit on some ios operation,
2396 * signalling timeout and CRC errors even on CMD0. Resetting
2397 * it on each ios seems to solve the problem.
2398 */
2399 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2400 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2401 }
2402 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2403
sdhci_get_cd(struct mmc_host * mmc)2404 static int sdhci_get_cd(struct mmc_host *mmc)
2405 {
2406 struct sdhci_host *host = mmc_priv(mmc);
2407 int gpio_cd = mmc_gpio_get_cd(mmc);
2408
2409 if (host->flags & SDHCI_DEVICE_DEAD)
2410 return 0;
2411
2412 /* If nonremovable, assume that the card is always present. */
2413 if (!mmc_card_is_removable(mmc))
2414 return 1;
2415
2416 /*
2417 * Try slot gpio detect, if defined it take precedence
2418 * over build in controller functionality
2419 */
2420 if (gpio_cd >= 0)
2421 return !!gpio_cd;
2422
2423 /* If polling, assume that the card is always present. */
2424 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2425 return 1;
2426
2427 /* Host native card detect */
2428 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2429 }
2430
sdhci_check_ro(struct sdhci_host * host)2431 static int sdhci_check_ro(struct sdhci_host *host)
2432 {
2433 unsigned long flags;
2434 int is_readonly;
2435
2436 spin_lock_irqsave(&host->lock, flags);
2437
2438 if (host->flags & SDHCI_DEVICE_DEAD)
2439 is_readonly = 0;
2440 else if (host->ops->get_ro)
2441 is_readonly = host->ops->get_ro(host);
2442 else if (mmc_can_gpio_ro(host->mmc))
2443 is_readonly = mmc_gpio_get_ro(host->mmc);
2444 else
2445 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2446 & SDHCI_WRITE_PROTECT);
2447
2448 spin_unlock_irqrestore(&host->lock, flags);
2449
2450 /* This quirk needs to be replaced by a callback-function later */
2451 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2452 !is_readonly : is_readonly;
2453 }
2454
2455 #define SAMPLE_COUNT 5
2456
sdhci_get_ro(struct mmc_host * mmc)2457 static int sdhci_get_ro(struct mmc_host *mmc)
2458 {
2459 struct sdhci_host *host = mmc_priv(mmc);
2460 int i, ro_count;
2461
2462 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2463 return sdhci_check_ro(host);
2464
2465 ro_count = 0;
2466 for (i = 0; i < SAMPLE_COUNT; i++) {
2467 if (sdhci_check_ro(host)) {
2468 if (++ro_count > SAMPLE_COUNT / 2)
2469 return 1;
2470 }
2471 msleep(30);
2472 }
2473 return 0;
2474 }
2475
sdhci_hw_reset(struct mmc_host * mmc)2476 static void sdhci_hw_reset(struct mmc_host *mmc)
2477 {
2478 struct sdhci_host *host = mmc_priv(mmc);
2479
2480 if (host->ops && host->ops->hw_reset)
2481 host->ops->hw_reset(host);
2482 }
2483
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2484 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2485 {
2486 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2487 if (enable)
2488 host->ier |= SDHCI_INT_CARD_INT;
2489 else
2490 host->ier &= ~SDHCI_INT_CARD_INT;
2491
2492 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2493 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2494 }
2495 }
2496
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2497 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2498 {
2499 struct sdhci_host *host = mmc_priv(mmc);
2500 unsigned long flags;
2501
2502 if (enable)
2503 pm_runtime_get_noresume(mmc_dev(mmc));
2504
2505 spin_lock_irqsave(&host->lock, flags);
2506 sdhci_enable_sdio_irq_nolock(host, enable);
2507 spin_unlock_irqrestore(&host->lock, flags);
2508
2509 if (!enable)
2510 pm_runtime_put_noidle(mmc_dev(mmc));
2511 }
2512 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2513
sdhci_ack_sdio_irq(struct mmc_host * mmc)2514 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2515 {
2516 struct sdhci_host *host = mmc_priv(mmc);
2517 unsigned long flags;
2518
2519 spin_lock_irqsave(&host->lock, flags);
2520 sdhci_enable_sdio_irq_nolock(host, true);
2521 spin_unlock_irqrestore(&host->lock, flags);
2522 }
2523
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2524 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2525 struct mmc_ios *ios)
2526 {
2527 struct sdhci_host *host = mmc_priv(mmc);
2528 u16 ctrl;
2529 int ret;
2530
2531 /*
2532 * Signal Voltage Switching is only applicable for Host Controllers
2533 * v3.00 and above.
2534 */
2535 if (host->version < SDHCI_SPEC_300)
2536 return 0;
2537
2538 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2539
2540 switch (ios->signal_voltage) {
2541 case MMC_SIGNAL_VOLTAGE_330:
2542 if (!(host->flags & SDHCI_SIGNALING_330))
2543 return -EINVAL;
2544 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2545 ctrl &= ~SDHCI_CTRL_VDD_180;
2546 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2547
2548 if (!IS_ERR(mmc->supply.vqmmc)) {
2549 ret = mmc_regulator_set_vqmmc(mmc, ios);
2550 if (ret < 0) {
2551 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2552 mmc_hostname(mmc));
2553 return -EIO;
2554 }
2555 }
2556 /* Wait for 5ms */
2557 usleep_range(5000, 5500);
2558
2559 /* 3.3V regulator output should be stable within 5 ms */
2560 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2561 if (!(ctrl & SDHCI_CTRL_VDD_180))
2562 return 0;
2563
2564 pr_warn("%s: 3.3V regulator output did not become stable\n",
2565 mmc_hostname(mmc));
2566
2567 return -EAGAIN;
2568 case MMC_SIGNAL_VOLTAGE_180:
2569 if (!(host->flags & SDHCI_SIGNALING_180))
2570 return -EINVAL;
2571 if (!IS_ERR(mmc->supply.vqmmc)) {
2572 ret = mmc_regulator_set_vqmmc(mmc, ios);
2573 if (ret < 0) {
2574 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2575 mmc_hostname(mmc));
2576 return -EIO;
2577 }
2578 }
2579
2580 /*
2581 * Enable 1.8V Signal Enable in the Host Control2
2582 * register
2583 */
2584 ctrl |= SDHCI_CTRL_VDD_180;
2585 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2586
2587 /* Some controller need to do more when switching */
2588 if (host->ops->voltage_switch)
2589 host->ops->voltage_switch(host);
2590
2591 /* 1.8V regulator output should be stable within 5 ms */
2592 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2593 if (ctrl & SDHCI_CTRL_VDD_180)
2594 return 0;
2595
2596 pr_warn("%s: 1.8V regulator output did not become stable\n",
2597 mmc_hostname(mmc));
2598
2599 return -EAGAIN;
2600 case MMC_SIGNAL_VOLTAGE_120:
2601 if (!(host->flags & SDHCI_SIGNALING_120))
2602 return -EINVAL;
2603 if (!IS_ERR(mmc->supply.vqmmc)) {
2604 ret = mmc_regulator_set_vqmmc(mmc, ios);
2605 if (ret < 0) {
2606 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2607 mmc_hostname(mmc));
2608 return -EIO;
2609 }
2610 }
2611 return 0;
2612 default:
2613 /* No signal voltage switch required */
2614 return 0;
2615 }
2616 }
2617 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2618
sdhci_card_busy(struct mmc_host * mmc)2619 static int sdhci_card_busy(struct mmc_host *mmc)
2620 {
2621 struct sdhci_host *host = mmc_priv(mmc);
2622 u32 present_state;
2623
2624 /* Check whether DAT[0] is 0 */
2625 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2626
2627 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2628 }
2629
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2630 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2631 {
2632 struct sdhci_host *host = mmc_priv(mmc);
2633 unsigned long flags;
2634
2635 spin_lock_irqsave(&host->lock, flags);
2636 host->flags |= SDHCI_HS400_TUNING;
2637 spin_unlock_irqrestore(&host->lock, flags);
2638
2639 return 0;
2640 }
2641
sdhci_start_tuning(struct sdhci_host * host)2642 void sdhci_start_tuning(struct sdhci_host *host)
2643 {
2644 u16 ctrl;
2645
2646 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2647 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2648 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2649 ctrl |= SDHCI_CTRL_TUNED_CLK;
2650 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2651
2652 /*
2653 * As per the Host Controller spec v3.00, tuning command
2654 * generates Buffer Read Ready interrupt, so enable that.
2655 *
2656 * Note: The spec clearly says that when tuning sequence
2657 * is being performed, the controller does not generate
2658 * interrupts other than Buffer Read Ready interrupt. But
2659 * to make sure we don't hit a controller bug, we _only_
2660 * enable Buffer Read Ready interrupt here.
2661 */
2662 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2663 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2664 }
2665 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2666
sdhci_end_tuning(struct sdhci_host * host)2667 void sdhci_end_tuning(struct sdhci_host *host)
2668 {
2669 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2670 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2671 }
2672 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2673
sdhci_reset_tuning(struct sdhci_host * host)2674 void sdhci_reset_tuning(struct sdhci_host *host)
2675 {
2676 u16 ctrl;
2677
2678 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2679 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2680 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2681 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2682 }
2683 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2684
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2685 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2686 {
2687 sdhci_reset_tuning(host);
2688
2689 sdhci_do_reset(host, SDHCI_RESET_CMD);
2690 sdhci_do_reset(host, SDHCI_RESET_DATA);
2691
2692 sdhci_end_tuning(host);
2693
2694 mmc_send_abort_tuning(host->mmc, opcode);
2695 }
2696 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2697
2698 /*
2699 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2700 * tuning command does not have a data payload (or rather the hardware does it
2701 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2702 * interrupt setup is different to other commands and there is no timeout
2703 * interrupt so special handling is needed.
2704 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2705 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2706 {
2707 struct mmc_host *mmc = host->mmc;
2708 struct mmc_command cmd = {};
2709 struct mmc_request mrq = {};
2710 unsigned long flags;
2711 u32 b = host->sdma_boundary;
2712
2713 spin_lock_irqsave(&host->lock, flags);
2714
2715 cmd.opcode = opcode;
2716 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2717 cmd.mrq = &mrq;
2718
2719 mrq.cmd = &cmd;
2720 /*
2721 * In response to CMD19, the card sends 64 bytes of tuning
2722 * block to the Host Controller. So we set the block size
2723 * to 64 here.
2724 */
2725 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2726 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2727 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2728 else
2729 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2730
2731 /*
2732 * The tuning block is sent by the card to the host controller.
2733 * So we set the TRNS_READ bit in the Transfer Mode register.
2734 * This also takes care of setting DMA Enable and Multi Block
2735 * Select in the same register to 0.
2736 */
2737 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2738
2739 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2740 spin_unlock_irqrestore(&host->lock, flags);
2741 host->tuning_done = 0;
2742 return;
2743 }
2744
2745 host->cmd = NULL;
2746
2747 sdhci_del_timer(host, &mrq);
2748
2749 host->tuning_done = 0;
2750
2751 spin_unlock_irqrestore(&host->lock, flags);
2752
2753 /* Wait for Buffer Read Ready interrupt */
2754 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2755 msecs_to_jiffies(50));
2756
2757 }
2758 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2759
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2760 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2761 {
2762 int i;
2763
2764 /*
2765 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2766 * of loops reaches tuning loop count.
2767 */
2768 for (i = 0; i < host->tuning_loop_count; i++) {
2769 u16 ctrl;
2770
2771 sdhci_send_tuning(host, opcode);
2772
2773 if (!host->tuning_done) {
2774 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2775 mmc_hostname(host->mmc));
2776 sdhci_abort_tuning(host, opcode);
2777 return -ETIMEDOUT;
2778 }
2779
2780 /* Spec does not require a delay between tuning cycles */
2781 if (host->tuning_delay > 0)
2782 mdelay(host->tuning_delay);
2783
2784 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2785 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2786 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2787 return 0; /* Success! */
2788 break;
2789 }
2790
2791 }
2792
2793 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2794 mmc_hostname(host->mmc));
2795 sdhci_reset_tuning(host);
2796 return -EAGAIN;
2797 }
2798
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2799 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2800 {
2801 struct sdhci_host *host = mmc_priv(mmc);
2802 int err = 0;
2803 unsigned int tuning_count = 0;
2804 bool hs400_tuning;
2805
2806 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2807
2808 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2809 tuning_count = host->tuning_count;
2810
2811 /*
2812 * The Host Controller needs tuning in case of SDR104 and DDR50
2813 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2814 * the Capabilities register.
2815 * If the Host Controller supports the HS200 mode then the
2816 * tuning function has to be executed.
2817 */
2818 switch (host->timing) {
2819 /* HS400 tuning is done in HS200 mode */
2820 case MMC_TIMING_MMC_HS400:
2821 err = -EINVAL;
2822 goto out;
2823
2824 case MMC_TIMING_MMC_HS200:
2825 /*
2826 * Periodic re-tuning for HS400 is not expected to be needed, so
2827 * disable it here.
2828 */
2829 if (hs400_tuning)
2830 tuning_count = 0;
2831 break;
2832
2833 case MMC_TIMING_UHS_SDR104:
2834 case MMC_TIMING_UHS_DDR50:
2835 break;
2836
2837 case MMC_TIMING_UHS_SDR50:
2838 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2839 break;
2840 fallthrough;
2841
2842 default:
2843 goto out;
2844 }
2845
2846 if (host->ops->platform_execute_tuning) {
2847 err = host->ops->platform_execute_tuning(host, opcode);
2848 goto out;
2849 }
2850
2851 mmc->retune_period = tuning_count;
2852
2853 if (host->tuning_delay < 0)
2854 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2855
2856 sdhci_start_tuning(host);
2857
2858 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2859
2860 sdhci_end_tuning(host);
2861 out:
2862 host->flags &= ~SDHCI_HS400_TUNING;
2863
2864 return err;
2865 }
2866 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2867
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2868 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2869 {
2870 /* Host Controller v3.00 defines preset value registers */
2871 if (host->version < SDHCI_SPEC_300)
2872 return;
2873
2874 /*
2875 * We only enable or disable Preset Value if they are not already
2876 * enabled or disabled respectively. Otherwise, we bail out.
2877 */
2878 if (host->preset_enabled != enable) {
2879 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2880
2881 if (enable)
2882 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2883 else
2884 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2885
2886 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2887
2888 if (enable)
2889 host->flags |= SDHCI_PV_ENABLED;
2890 else
2891 host->flags &= ~SDHCI_PV_ENABLED;
2892
2893 host->preset_enabled = enable;
2894 }
2895 }
2896
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2897 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2898 int err)
2899 {
2900 struct mmc_data *data = mrq->data;
2901
2902 if (data->host_cookie != COOKIE_UNMAPPED)
2903 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2904 mmc_get_dma_dir(data));
2905
2906 data->host_cookie = COOKIE_UNMAPPED;
2907 }
2908
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2909 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2910 {
2911 struct sdhci_host *host = mmc_priv(mmc);
2912
2913 mrq->data->host_cookie = COOKIE_UNMAPPED;
2914
2915 /*
2916 * No pre-mapping in the pre hook if we're using the bounce buffer,
2917 * for that we would need two bounce buffers since one buffer is
2918 * in flight when this is getting called.
2919 */
2920 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2921 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2922 }
2923
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2924 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2925 {
2926 if (host->data_cmd) {
2927 host->data_cmd->error = err;
2928 sdhci_finish_mrq(host, host->data_cmd->mrq);
2929 }
2930
2931 if (host->cmd) {
2932 host->cmd->error = err;
2933 sdhci_finish_mrq(host, host->cmd->mrq);
2934 }
2935 }
2936
sdhci_card_event(struct mmc_host * mmc)2937 static void sdhci_card_event(struct mmc_host *mmc)
2938 {
2939 struct sdhci_host *host = mmc_priv(mmc);
2940 unsigned long flags;
2941 int present;
2942
2943 /* First check if client has provided their own card event */
2944 if (host->ops->card_event)
2945 host->ops->card_event(host);
2946
2947 present = mmc->ops->get_cd(mmc);
2948
2949 spin_lock_irqsave(&host->lock, flags);
2950
2951 /* Check sdhci_has_requests() first in case we are runtime suspended */
2952 if (sdhci_has_requests(host) && !present) {
2953 pr_err("%s: Card removed during transfer!\n",
2954 mmc_hostname(mmc));
2955 pr_err("%s: Resetting controller.\n",
2956 mmc_hostname(mmc));
2957
2958 sdhci_do_reset(host, SDHCI_RESET_CMD);
2959 sdhci_do_reset(host, SDHCI_RESET_DATA);
2960
2961 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2962 }
2963
2964 spin_unlock_irqrestore(&host->lock, flags);
2965 }
2966
2967 static const struct mmc_host_ops sdhci_ops = {
2968 .request = sdhci_request,
2969 .post_req = sdhci_post_req,
2970 .pre_req = sdhci_pre_req,
2971 .set_ios = sdhci_set_ios,
2972 .get_cd = sdhci_get_cd,
2973 .get_ro = sdhci_get_ro,
2974 .hw_reset = sdhci_hw_reset,
2975 .enable_sdio_irq = sdhci_enable_sdio_irq,
2976 .ack_sdio_irq = sdhci_ack_sdio_irq,
2977 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2978 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2979 .execute_tuning = sdhci_execute_tuning,
2980 .card_event = sdhci_card_event,
2981 .card_busy = sdhci_card_busy,
2982 };
2983
2984 /*****************************************************************************\
2985 * *
2986 * Request done *
2987 * *
2988 \*****************************************************************************/
2989
sdhci_request_done(struct sdhci_host * host)2990 static bool sdhci_request_done(struct sdhci_host *host)
2991 {
2992 unsigned long flags;
2993 struct mmc_request *mrq;
2994 int i;
2995
2996 spin_lock_irqsave(&host->lock, flags);
2997
2998 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2999 mrq = host->mrqs_done[i];
3000 if (mrq)
3001 break;
3002 }
3003
3004 if (!mrq) {
3005 spin_unlock_irqrestore(&host->lock, flags);
3006 return true;
3007 }
3008
3009 /*
3010 * The controller needs a reset of internal state machines
3011 * upon error conditions.
3012 */
3013 if (sdhci_needs_reset(host, mrq)) {
3014 /*
3015 * Do not finish until command and data lines are available for
3016 * reset. Note there can only be one other mrq, so it cannot
3017 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3018 * would both be null.
3019 */
3020 if (host->cmd || host->data_cmd) {
3021 spin_unlock_irqrestore(&host->lock, flags);
3022 return true;
3023 }
3024
3025 /* Some controllers need this kick or reset won't work here */
3026 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3027 /* This is to force an update */
3028 host->ops->set_clock(host, host->clock);
3029
3030 /*
3031 * Spec says we should do both at the same time, but Ricoh
3032 * controllers do not like that.
3033 */
3034 sdhci_do_reset(host, SDHCI_RESET_CMD);
3035 sdhci_do_reset(host, SDHCI_RESET_DATA);
3036
3037 host->pending_reset = false;
3038 }
3039
3040 /*
3041 * Always unmap the data buffers if they were mapped by
3042 * sdhci_prepare_data() whenever we finish with a request.
3043 * This avoids leaking DMA mappings on error.
3044 */
3045 if (host->flags & SDHCI_REQ_USE_DMA) {
3046 struct mmc_data *data = mrq->data;
3047
3048 if (host->use_external_dma && data &&
3049 (mrq->cmd->error || data->error)) {
3050 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3051
3052 host->mrqs_done[i] = NULL;
3053 spin_unlock_irqrestore(&host->lock, flags);
3054 dmaengine_terminate_sync(chan);
3055 spin_lock_irqsave(&host->lock, flags);
3056 sdhci_set_mrq_done(host, mrq);
3057 }
3058
3059 if (data && data->host_cookie == COOKIE_MAPPED) {
3060 if (host->bounce_buffer) {
3061 /*
3062 * On reads, copy the bounced data into the
3063 * sglist
3064 */
3065 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3066 unsigned int length = data->bytes_xfered;
3067
3068 if (length > host->bounce_buffer_size) {
3069 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3070 mmc_hostname(host->mmc),
3071 host->bounce_buffer_size,
3072 data->bytes_xfered);
3073 /* Cap it down and continue */
3074 length = host->bounce_buffer_size;
3075 }
3076 dma_sync_single_for_cpu(
3077 mmc_dev(host->mmc),
3078 host->bounce_addr,
3079 host->bounce_buffer_size,
3080 DMA_FROM_DEVICE);
3081 sg_copy_from_buffer(data->sg,
3082 data->sg_len,
3083 host->bounce_buffer,
3084 length);
3085 } else {
3086 /* No copying, just switch ownership */
3087 dma_sync_single_for_cpu(
3088 mmc_dev(host->mmc),
3089 host->bounce_addr,
3090 host->bounce_buffer_size,
3091 mmc_get_dma_dir(data));
3092 }
3093 } else {
3094 /* Unmap the raw data */
3095 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3096 data->sg_len,
3097 mmc_get_dma_dir(data));
3098 }
3099 data->host_cookie = COOKIE_UNMAPPED;
3100 }
3101 }
3102
3103 host->mrqs_done[i] = NULL;
3104
3105 spin_unlock_irqrestore(&host->lock, flags);
3106
3107 if (host->ops->request_done)
3108 host->ops->request_done(host, mrq);
3109 else
3110 mmc_request_done(host->mmc, mrq);
3111
3112 return false;
3113 }
3114
sdhci_complete_work(struct work_struct * work)3115 static void sdhci_complete_work(struct work_struct *work)
3116 {
3117 struct sdhci_host *host = container_of(work, struct sdhci_host,
3118 complete_work);
3119
3120 while (!sdhci_request_done(host))
3121 ;
3122 }
3123
sdhci_timeout_timer(struct timer_list * t)3124 static void sdhci_timeout_timer(struct timer_list *t)
3125 {
3126 struct sdhci_host *host;
3127 unsigned long flags;
3128
3129 host = from_timer(host, t, timer);
3130
3131 spin_lock_irqsave(&host->lock, flags);
3132
3133 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3134 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3135 mmc_hostname(host->mmc));
3136 sdhci_dumpregs(host);
3137
3138 host->cmd->error = -ETIMEDOUT;
3139 sdhci_finish_mrq(host, host->cmd->mrq);
3140 }
3141
3142 spin_unlock_irqrestore(&host->lock, flags);
3143 }
3144
sdhci_timeout_data_timer(struct timer_list * t)3145 static void sdhci_timeout_data_timer(struct timer_list *t)
3146 {
3147 struct sdhci_host *host;
3148 unsigned long flags;
3149
3150 host = from_timer(host, t, data_timer);
3151
3152 spin_lock_irqsave(&host->lock, flags);
3153
3154 if (host->data || host->data_cmd ||
3155 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3156 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3157 mmc_hostname(host->mmc));
3158 sdhci_dumpregs(host);
3159
3160 if (host->data) {
3161 host->data->error = -ETIMEDOUT;
3162 __sdhci_finish_data(host, true);
3163 queue_work(host->complete_wq, &host->complete_work);
3164 } else if (host->data_cmd) {
3165 host->data_cmd->error = -ETIMEDOUT;
3166 sdhci_finish_mrq(host, host->data_cmd->mrq);
3167 } else {
3168 host->cmd->error = -ETIMEDOUT;
3169 sdhci_finish_mrq(host, host->cmd->mrq);
3170 }
3171 }
3172
3173 spin_unlock_irqrestore(&host->lock, flags);
3174 }
3175
3176 /*****************************************************************************\
3177 * *
3178 * Interrupt handling *
3179 * *
3180 \*****************************************************************************/
3181
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3182 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3183 {
3184 /* Handle auto-CMD12 error */
3185 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3186 struct mmc_request *mrq = host->data_cmd->mrq;
3187 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3188 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3189 SDHCI_INT_DATA_TIMEOUT :
3190 SDHCI_INT_DATA_CRC;
3191
3192 /* Treat auto-CMD12 error the same as data error */
3193 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3194 *intmask_p |= data_err_bit;
3195 return;
3196 }
3197 }
3198
3199 if (!host->cmd) {
3200 /*
3201 * SDHCI recovers from errors by resetting the cmd and data
3202 * circuits. Until that is done, there very well might be more
3203 * interrupts, so ignore them in that case.
3204 */
3205 if (host->pending_reset)
3206 return;
3207 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3208 mmc_hostname(host->mmc), (unsigned)intmask);
3209 sdhci_dumpregs(host);
3210 return;
3211 }
3212
3213 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3214 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3215 if (intmask & SDHCI_INT_TIMEOUT)
3216 host->cmd->error = -ETIMEDOUT;
3217 else
3218 host->cmd->error = -EILSEQ;
3219
3220 /* Treat data command CRC error the same as data CRC error */
3221 if (host->cmd->data &&
3222 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3223 SDHCI_INT_CRC) {
3224 host->cmd = NULL;
3225 *intmask_p |= SDHCI_INT_DATA_CRC;
3226 return;
3227 }
3228
3229 __sdhci_finish_mrq(host, host->cmd->mrq);
3230 return;
3231 }
3232
3233 /* Handle auto-CMD23 error */
3234 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3235 struct mmc_request *mrq = host->cmd->mrq;
3236 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3237 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3238 -ETIMEDOUT :
3239 -EILSEQ;
3240
3241 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3242 mrq->sbc->error = err;
3243 __sdhci_finish_mrq(host, mrq);
3244 return;
3245 }
3246 }
3247
3248 if (intmask & SDHCI_INT_RESPONSE)
3249 sdhci_finish_command(host);
3250 }
3251
sdhci_adma_show_error(struct sdhci_host * host)3252 static void sdhci_adma_show_error(struct sdhci_host *host)
3253 {
3254 void *desc = host->adma_table;
3255 dma_addr_t dma = host->adma_addr;
3256
3257 sdhci_dumpregs(host);
3258
3259 while (true) {
3260 struct sdhci_adma2_64_desc *dma_desc = desc;
3261
3262 if (host->flags & SDHCI_USE_64_BIT_DMA)
3263 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3264 (unsigned long long)dma,
3265 le32_to_cpu(dma_desc->addr_hi),
3266 le32_to_cpu(dma_desc->addr_lo),
3267 le16_to_cpu(dma_desc->len),
3268 le16_to_cpu(dma_desc->cmd));
3269 else
3270 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3271 (unsigned long long)dma,
3272 le32_to_cpu(dma_desc->addr_lo),
3273 le16_to_cpu(dma_desc->len),
3274 le16_to_cpu(dma_desc->cmd));
3275
3276 desc += host->desc_sz;
3277 dma += host->desc_sz;
3278
3279 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3280 break;
3281 }
3282 }
3283
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3284 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3285 {
3286 u32 command;
3287
3288 /*
3289 * CMD19 generates _only_ Buffer Read Ready interrupt if
3290 * use sdhci_send_tuning.
3291 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3292 * If not, sdhci_transfer_pio will never be called, make the
3293 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3294 */
3295 if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3296 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3297 if (command == MMC_SEND_TUNING_BLOCK ||
3298 command == MMC_SEND_TUNING_BLOCK_HS200) {
3299 host->tuning_done = 1;
3300 wake_up(&host->buf_ready_int);
3301 return;
3302 }
3303 }
3304
3305 if (!host->data) {
3306 struct mmc_command *data_cmd = host->data_cmd;
3307
3308 /*
3309 * The "data complete" interrupt is also used to
3310 * indicate that a busy state has ended. See comment
3311 * above in sdhci_cmd_irq().
3312 */
3313 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3314 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3315 host->data_cmd = NULL;
3316 data_cmd->error = -ETIMEDOUT;
3317 __sdhci_finish_mrq(host, data_cmd->mrq);
3318 return;
3319 }
3320 if (intmask & SDHCI_INT_DATA_END) {
3321 host->data_cmd = NULL;
3322 /*
3323 * Some cards handle busy-end interrupt
3324 * before the command completed, so make
3325 * sure we do things in the proper order.
3326 */
3327 if (host->cmd == data_cmd)
3328 return;
3329
3330 __sdhci_finish_mrq(host, data_cmd->mrq);
3331 return;
3332 }
3333 }
3334
3335 /*
3336 * SDHCI recovers from errors by resetting the cmd and data
3337 * circuits. Until that is done, there very well might be more
3338 * interrupts, so ignore them in that case.
3339 */
3340 if (host->pending_reset)
3341 return;
3342
3343 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3344 mmc_hostname(host->mmc), (unsigned)intmask);
3345 sdhci_dumpregs(host);
3346
3347 return;
3348 }
3349
3350 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3351 host->data->error = -ETIMEDOUT;
3352 else if (intmask & SDHCI_INT_DATA_END_BIT)
3353 host->data->error = -EILSEQ;
3354 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3355 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3356 != MMC_BUS_TEST_R)
3357 host->data->error = -EILSEQ;
3358 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3359 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3360 intmask);
3361 sdhci_adma_show_error(host);
3362 host->data->error = -EIO;
3363 if (host->ops->adma_workaround)
3364 host->ops->adma_workaround(host, intmask);
3365 }
3366
3367 if (host->data->error)
3368 sdhci_finish_data(host);
3369 else {
3370 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3371 sdhci_transfer_pio(host);
3372
3373 /*
3374 * We currently don't do anything fancy with DMA
3375 * boundaries, but as we can't disable the feature
3376 * we need to at least restart the transfer.
3377 *
3378 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3379 * should return a valid address to continue from, but as
3380 * some controllers are faulty, don't trust them.
3381 */
3382 if (intmask & SDHCI_INT_DMA_END) {
3383 dma_addr_t dmastart, dmanow;
3384
3385 dmastart = sdhci_sdma_address(host);
3386 dmanow = dmastart + host->data->bytes_xfered;
3387 /*
3388 * Force update to the next DMA block boundary.
3389 */
3390 dmanow = (dmanow &
3391 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3392 SDHCI_DEFAULT_BOUNDARY_SIZE;
3393 host->data->bytes_xfered = dmanow - dmastart;
3394 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3395 &dmastart, host->data->bytes_xfered, &dmanow);
3396 sdhci_set_sdma_addr(host, dmanow);
3397 }
3398
3399 if (intmask & SDHCI_INT_DATA_END) {
3400 if (host->cmd == host->data_cmd) {
3401 /*
3402 * Data managed to finish before the
3403 * command completed. Make sure we do
3404 * things in the proper order.
3405 */
3406 host->data_early = 1;
3407 } else {
3408 sdhci_finish_data(host);
3409 }
3410 }
3411 }
3412 }
3413
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3414 static inline bool sdhci_defer_done(struct sdhci_host *host,
3415 struct mmc_request *mrq)
3416 {
3417 struct mmc_data *data = mrq->data;
3418
3419 return host->pending_reset || host->always_defer_done ||
3420 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3421 data->host_cookie == COOKIE_MAPPED);
3422 }
3423
sdhci_irq(int irq,void * dev_id)3424 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3425 {
3426 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3427 irqreturn_t result = IRQ_NONE;
3428 struct sdhci_host *host = dev_id;
3429 u32 intmask, mask, unexpected = 0;
3430 int max_loops = 16;
3431 int i;
3432
3433 spin_lock(&host->lock);
3434
3435 if (host->runtime_suspended) {
3436 spin_unlock(&host->lock);
3437 return IRQ_NONE;
3438 }
3439
3440 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3441 if (!intmask || intmask == 0xffffffff) {
3442 result = IRQ_NONE;
3443 goto out;
3444 }
3445
3446 do {
3447 DBG("IRQ status 0x%08x\n", intmask);
3448
3449 if (host->ops->irq) {
3450 intmask = host->ops->irq(host, intmask);
3451 if (!intmask)
3452 goto cont;
3453 }
3454
3455 /* Clear selected interrupts. */
3456 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3457 SDHCI_INT_BUS_POWER);
3458 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3459
3460 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3461 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3462 SDHCI_CARD_PRESENT;
3463
3464 /*
3465 * There is a observation on i.mx esdhc. INSERT
3466 * bit will be immediately set again when it gets
3467 * cleared, if a card is inserted. We have to mask
3468 * the irq to prevent interrupt storm which will
3469 * freeze the system. And the REMOVE gets the
3470 * same situation.
3471 *
3472 * More testing are needed here to ensure it works
3473 * for other platforms though.
3474 */
3475 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3476 SDHCI_INT_CARD_REMOVE);
3477 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3478 SDHCI_INT_CARD_INSERT;
3479 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3480 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3481
3482 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3483 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3484
3485 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3486 SDHCI_INT_CARD_REMOVE);
3487 result = IRQ_WAKE_THREAD;
3488 }
3489
3490 if (intmask & SDHCI_INT_CMD_MASK)
3491 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3492
3493 if (intmask & SDHCI_INT_DATA_MASK)
3494 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3495
3496 if (intmask & SDHCI_INT_BUS_POWER)
3497 pr_err("%s: Card is consuming too much power!\n",
3498 mmc_hostname(host->mmc));
3499
3500 if (intmask & SDHCI_INT_RETUNE)
3501 mmc_retune_needed(host->mmc);
3502
3503 if ((intmask & SDHCI_INT_CARD_INT) &&
3504 (host->ier & SDHCI_INT_CARD_INT)) {
3505 sdhci_enable_sdio_irq_nolock(host, false);
3506 sdio_signal_irq(host->mmc);
3507 }
3508
3509 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3510 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3511 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3512 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3513
3514 if (intmask) {
3515 unexpected |= intmask;
3516 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3517 }
3518 cont:
3519 if (result == IRQ_NONE)
3520 result = IRQ_HANDLED;
3521
3522 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3523 } while (intmask && --max_loops);
3524
3525 /* Determine if mrqs can be completed immediately */
3526 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3527 struct mmc_request *mrq = host->mrqs_done[i];
3528
3529 if (!mrq)
3530 continue;
3531
3532 if (sdhci_defer_done(host, mrq)) {
3533 result = IRQ_WAKE_THREAD;
3534 } else {
3535 mrqs_done[i] = mrq;
3536 host->mrqs_done[i] = NULL;
3537 }
3538 }
3539 out:
3540 if (host->deferred_cmd)
3541 result = IRQ_WAKE_THREAD;
3542
3543 spin_unlock(&host->lock);
3544
3545 /* Process mrqs ready for immediate completion */
3546 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3547 if (!mrqs_done[i])
3548 continue;
3549
3550 if (host->ops->request_done)
3551 host->ops->request_done(host, mrqs_done[i]);
3552 else
3553 mmc_request_done(host->mmc, mrqs_done[i]);
3554 }
3555
3556 if (unexpected) {
3557 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3558 mmc_hostname(host->mmc), unexpected);
3559 sdhci_dumpregs(host);
3560 }
3561
3562 return result;
3563 }
3564
sdhci_thread_irq(int irq,void * dev_id)3565 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3566 {
3567 struct sdhci_host *host = dev_id;
3568 struct mmc_command *cmd;
3569 unsigned long flags;
3570 u32 isr;
3571
3572 while (!sdhci_request_done(host))
3573 ;
3574
3575 spin_lock_irqsave(&host->lock, flags);
3576
3577 isr = host->thread_isr;
3578 host->thread_isr = 0;
3579
3580 cmd = host->deferred_cmd;
3581 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3582 sdhci_finish_mrq(host, cmd->mrq);
3583
3584 spin_unlock_irqrestore(&host->lock, flags);
3585
3586 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3587 struct mmc_host *mmc = host->mmc;
3588
3589 mmc->ops->card_event(mmc);
3590 mmc_detect_change(mmc, msecs_to_jiffies(200));
3591 }
3592
3593 return IRQ_HANDLED;
3594 }
3595
3596 /*****************************************************************************\
3597 * *
3598 * Suspend/resume *
3599 * *
3600 \*****************************************************************************/
3601
3602 #ifdef CONFIG_PM
3603
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3604 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3605 {
3606 return mmc_card_is_removable(host->mmc) &&
3607 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3608 !mmc_can_gpio_cd(host->mmc);
3609 }
3610
3611 /*
3612 * To enable wakeup events, the corresponding events have to be enabled in
3613 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3614 * Table' in the SD Host Controller Standard Specification.
3615 * It is useless to restore SDHCI_INT_ENABLE state in
3616 * sdhci_disable_irq_wakeups() since it will be set by
3617 * sdhci_enable_card_detection() or sdhci_init().
3618 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3619 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3620 {
3621 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3622 SDHCI_WAKE_ON_INT;
3623 u32 irq_val = 0;
3624 u8 wake_val = 0;
3625 u8 val;
3626
3627 if (sdhci_cd_irq_can_wakeup(host)) {
3628 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3629 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3630 }
3631
3632 if (mmc_card_wake_sdio_irq(host->mmc)) {
3633 wake_val |= SDHCI_WAKE_ON_INT;
3634 irq_val |= SDHCI_INT_CARD_INT;
3635 }
3636
3637 if (!irq_val)
3638 return false;
3639
3640 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3641 val &= ~mask;
3642 val |= wake_val;
3643 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3644
3645 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3646
3647 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3648
3649 return host->irq_wake_enabled;
3650 }
3651
sdhci_disable_irq_wakeups(struct sdhci_host * host)3652 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3653 {
3654 u8 val;
3655 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3656 | SDHCI_WAKE_ON_INT;
3657
3658 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3659 val &= ~mask;
3660 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3661
3662 disable_irq_wake(host->irq);
3663
3664 host->irq_wake_enabled = false;
3665 }
3666
sdhci_suspend_host(struct sdhci_host * host)3667 int sdhci_suspend_host(struct sdhci_host *host)
3668 {
3669 sdhci_disable_card_detection(host);
3670
3671 mmc_retune_timer_stop(host->mmc);
3672
3673 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3674 !sdhci_enable_irq_wakeups(host)) {
3675 host->ier = 0;
3676 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3677 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3678 free_irq(host->irq, host);
3679 }
3680
3681 return 0;
3682 }
3683
3684 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3685
sdhci_resume_host(struct sdhci_host * host)3686 int sdhci_resume_host(struct sdhci_host *host)
3687 {
3688 struct mmc_host *mmc = host->mmc;
3689 int ret = 0;
3690
3691 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3692 if (host->ops->enable_dma)
3693 host->ops->enable_dma(host);
3694 }
3695
3696 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3697 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3698 /* Card keeps power but host controller does not */
3699 sdhci_init(host, 0);
3700 host->pwr = 0;
3701 host->clock = 0;
3702 mmc->ops->set_ios(mmc, &mmc->ios);
3703 } else {
3704 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3705 }
3706
3707 if (host->irq_wake_enabled) {
3708 sdhci_disable_irq_wakeups(host);
3709 } else {
3710 ret = request_threaded_irq(host->irq, sdhci_irq,
3711 sdhci_thread_irq, IRQF_SHARED,
3712 mmc_hostname(mmc), host);
3713 if (ret)
3714 return ret;
3715 }
3716
3717 sdhci_enable_card_detection(host);
3718
3719 return ret;
3720 }
3721
3722 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3723
sdhci_runtime_suspend_host(struct sdhci_host * host)3724 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3725 {
3726 unsigned long flags;
3727
3728 mmc_retune_timer_stop(host->mmc);
3729
3730 spin_lock_irqsave(&host->lock, flags);
3731 host->ier &= SDHCI_INT_CARD_INT;
3732 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3733 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3734 spin_unlock_irqrestore(&host->lock, flags);
3735
3736 synchronize_hardirq(host->irq);
3737
3738 spin_lock_irqsave(&host->lock, flags);
3739 host->runtime_suspended = true;
3740 spin_unlock_irqrestore(&host->lock, flags);
3741
3742 return 0;
3743 }
3744 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3745
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3746 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3747 {
3748 struct mmc_host *mmc = host->mmc;
3749 unsigned long flags;
3750 int host_flags = host->flags;
3751
3752 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3753 if (host->ops->enable_dma)
3754 host->ops->enable_dma(host);
3755 }
3756
3757 sdhci_init(host, soft_reset);
3758
3759 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3760 mmc->ios.power_mode != MMC_POWER_OFF) {
3761 /* Force clock and power re-program */
3762 host->pwr = 0;
3763 host->clock = 0;
3764 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3765 mmc->ops->set_ios(mmc, &mmc->ios);
3766
3767 if ((host_flags & SDHCI_PV_ENABLED) &&
3768 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3769 spin_lock_irqsave(&host->lock, flags);
3770 sdhci_enable_preset_value(host, true);
3771 spin_unlock_irqrestore(&host->lock, flags);
3772 }
3773
3774 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3775 mmc->ops->hs400_enhanced_strobe)
3776 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3777 }
3778
3779 spin_lock_irqsave(&host->lock, flags);
3780
3781 host->runtime_suspended = false;
3782
3783 /* Enable SDIO IRQ */
3784 if (sdio_irq_claimed(mmc))
3785 sdhci_enable_sdio_irq_nolock(host, true);
3786
3787 /* Enable Card Detection */
3788 sdhci_enable_card_detection(host);
3789
3790 spin_unlock_irqrestore(&host->lock, flags);
3791
3792 return 0;
3793 }
3794 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3795
3796 #endif /* CONFIG_PM */
3797
3798 /*****************************************************************************\
3799 * *
3800 * Command Queue Engine (CQE) helpers *
3801 * *
3802 \*****************************************************************************/
3803
sdhci_cqe_enable(struct mmc_host * mmc)3804 void sdhci_cqe_enable(struct mmc_host *mmc)
3805 {
3806 struct sdhci_host *host = mmc_priv(mmc);
3807 unsigned long flags;
3808 u8 ctrl;
3809
3810 spin_lock_irqsave(&host->lock, flags);
3811
3812 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3813 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3814 /*
3815 * Host from V4.10 supports ADMA3 DMA type.
3816 * ADMA3 performs integrated descriptor which is more suitable
3817 * for cmd queuing to fetch both command and transfer descriptors.
3818 */
3819 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3820 ctrl |= SDHCI_CTRL_ADMA3;
3821 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3822 ctrl |= SDHCI_CTRL_ADMA64;
3823 else
3824 ctrl |= SDHCI_CTRL_ADMA32;
3825 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3826
3827 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3828 SDHCI_BLOCK_SIZE);
3829
3830 /* Set maximum timeout */
3831 sdhci_set_timeout(host, NULL);
3832
3833 host->ier = host->cqe_ier;
3834
3835 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3836 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3837
3838 host->cqe_on = true;
3839
3840 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3841 mmc_hostname(mmc), host->ier,
3842 sdhci_readl(host, SDHCI_INT_STATUS));
3843
3844 spin_unlock_irqrestore(&host->lock, flags);
3845 }
3846 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3847
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3848 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3849 {
3850 struct sdhci_host *host = mmc_priv(mmc);
3851 unsigned long flags;
3852
3853 spin_lock_irqsave(&host->lock, flags);
3854
3855 sdhci_set_default_irqs(host);
3856
3857 host->cqe_on = false;
3858
3859 if (recovery) {
3860 sdhci_do_reset(host, SDHCI_RESET_CMD);
3861 sdhci_do_reset(host, SDHCI_RESET_DATA);
3862 }
3863
3864 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3865 mmc_hostname(mmc), host->ier,
3866 sdhci_readl(host, SDHCI_INT_STATUS));
3867
3868 spin_unlock_irqrestore(&host->lock, flags);
3869 }
3870 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3871
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3872 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3873 int *data_error)
3874 {
3875 u32 mask;
3876
3877 if (!host->cqe_on)
3878 return false;
3879
3880 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3881 *cmd_error = -EILSEQ;
3882 else if (intmask & SDHCI_INT_TIMEOUT)
3883 *cmd_error = -ETIMEDOUT;
3884 else
3885 *cmd_error = 0;
3886
3887 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3888 *data_error = -EILSEQ;
3889 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3890 *data_error = -ETIMEDOUT;
3891 else if (intmask & SDHCI_INT_ADMA_ERROR)
3892 *data_error = -EIO;
3893 else
3894 *data_error = 0;
3895
3896 /* Clear selected interrupts. */
3897 mask = intmask & host->cqe_ier;
3898 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3899
3900 if (intmask & SDHCI_INT_BUS_POWER)
3901 pr_err("%s: Card is consuming too much power!\n",
3902 mmc_hostname(host->mmc));
3903
3904 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3905 if (intmask) {
3906 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3907 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3908 mmc_hostname(host->mmc), intmask);
3909 sdhci_dumpregs(host);
3910 }
3911
3912 return true;
3913 }
3914 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3915
3916 /*****************************************************************************\
3917 * *
3918 * Device allocation/registration *
3919 * *
3920 \*****************************************************************************/
3921
sdhci_alloc_host(struct device * dev,size_t priv_size)3922 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3923 size_t priv_size)
3924 {
3925 struct mmc_host *mmc;
3926 struct sdhci_host *host;
3927
3928 WARN_ON(dev == NULL);
3929
3930 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3931 if (!mmc)
3932 return ERR_PTR(-ENOMEM);
3933
3934 host = mmc_priv(mmc);
3935 host->mmc = mmc;
3936 host->mmc_host_ops = sdhci_ops;
3937 mmc->ops = &host->mmc_host_ops;
3938
3939 host->flags = SDHCI_SIGNALING_330;
3940
3941 host->cqe_ier = SDHCI_CQE_INT_MASK;
3942 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3943
3944 host->tuning_delay = -1;
3945 host->tuning_loop_count = MAX_TUNING_LOOP;
3946
3947 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3948
3949 /*
3950 * The DMA table descriptor count is calculated as the maximum
3951 * number of segments times 2, to allow for an alignment
3952 * descriptor for each segment, plus 1 for a nop end descriptor.
3953 */
3954 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3955
3956 host->max_timeout_count = 0xE;
3957
3958 return host;
3959 }
3960
3961 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3962
sdhci_set_dma_mask(struct sdhci_host * host)3963 static int sdhci_set_dma_mask(struct sdhci_host *host)
3964 {
3965 struct mmc_host *mmc = host->mmc;
3966 struct device *dev = mmc_dev(mmc);
3967 int ret = -EINVAL;
3968
3969 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3970 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3971
3972 /* Try 64-bit mask if hardware is capable of it */
3973 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3974 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3975 if (ret) {
3976 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3977 mmc_hostname(mmc));
3978 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3979 }
3980 }
3981
3982 /* 32-bit mask as default & fallback */
3983 if (ret) {
3984 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3985 if (ret)
3986 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3987 mmc_hostname(mmc));
3988 }
3989
3990 return ret;
3991 }
3992
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)3993 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3994 const u32 *caps, const u32 *caps1)
3995 {
3996 u16 v;
3997 u64 dt_caps_mask = 0;
3998 u64 dt_caps = 0;
3999
4000 if (host->read_caps)
4001 return;
4002
4003 host->read_caps = true;
4004
4005 if (debug_quirks)
4006 host->quirks = debug_quirks;
4007
4008 if (debug_quirks2)
4009 host->quirks2 = debug_quirks2;
4010
4011 sdhci_do_reset(host, SDHCI_RESET_ALL);
4012
4013 if (host->v4_mode)
4014 sdhci_do_enable_v4_mode(host);
4015
4016 device_property_read_u64(mmc_dev(host->mmc),
4017 "sdhci-caps-mask", &dt_caps_mask);
4018 device_property_read_u64(mmc_dev(host->mmc),
4019 "sdhci-caps", &dt_caps);
4020
4021 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4022 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4023
4024 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4025 return;
4026
4027 if (caps) {
4028 host->caps = *caps;
4029 } else {
4030 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4031 host->caps &= ~lower_32_bits(dt_caps_mask);
4032 host->caps |= lower_32_bits(dt_caps);
4033 }
4034
4035 if (host->version < SDHCI_SPEC_300)
4036 return;
4037
4038 if (caps1) {
4039 host->caps1 = *caps1;
4040 } else {
4041 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4042 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4043 host->caps1 |= upper_32_bits(dt_caps);
4044 }
4045 }
4046 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4047
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4048 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4049 {
4050 struct mmc_host *mmc = host->mmc;
4051 unsigned int max_blocks;
4052 unsigned int bounce_size;
4053 int ret;
4054
4055 /*
4056 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4057 * has diminishing returns, this is probably because SD/MMC
4058 * cards are usually optimized to handle this size of requests.
4059 */
4060 bounce_size = SZ_64K;
4061 /*
4062 * Adjust downwards to maximum request size if this is less
4063 * than our segment size, else hammer down the maximum
4064 * request size to the maximum buffer size.
4065 */
4066 if (mmc->max_req_size < bounce_size)
4067 bounce_size = mmc->max_req_size;
4068 max_blocks = bounce_size / 512;
4069
4070 /*
4071 * When we just support one segment, we can get significant
4072 * speedups by the help of a bounce buffer to group scattered
4073 * reads/writes together.
4074 */
4075 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4076 bounce_size,
4077 GFP_KERNEL);
4078 if (!host->bounce_buffer) {
4079 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4080 mmc_hostname(mmc),
4081 bounce_size);
4082 /*
4083 * Exiting with zero here makes sure we proceed with
4084 * mmc->max_segs == 1.
4085 */
4086 return;
4087 }
4088
4089 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4090 host->bounce_buffer,
4091 bounce_size,
4092 DMA_BIDIRECTIONAL);
4093 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4094 if (ret) {
4095 devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4096 host->bounce_buffer = NULL;
4097 /* Again fall back to max_segs == 1 */
4098 return;
4099 }
4100
4101 host->bounce_buffer_size = bounce_size;
4102
4103 /* Lie about this since we're bouncing */
4104 mmc->max_segs = max_blocks;
4105 mmc->max_seg_size = bounce_size;
4106 mmc->max_req_size = bounce_size;
4107
4108 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4109 mmc_hostname(mmc), max_blocks, bounce_size);
4110 }
4111
sdhci_can_64bit_dma(struct sdhci_host * host)4112 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4113 {
4114 /*
4115 * According to SD Host Controller spec v4.10, bit[27] added from
4116 * version 4.10 in Capabilities Register is used as 64-bit System
4117 * Address support for V4 mode.
4118 */
4119 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4120 return host->caps & SDHCI_CAN_64BIT_V4;
4121
4122 return host->caps & SDHCI_CAN_64BIT;
4123 }
4124
sdhci_setup_host(struct sdhci_host * host)4125 int sdhci_setup_host(struct sdhci_host *host)
4126 {
4127 struct mmc_host *mmc;
4128 u32 max_current_caps;
4129 unsigned int ocr_avail;
4130 unsigned int override_timeout_clk;
4131 u32 max_clk;
4132 int ret = 0;
4133 bool enable_vqmmc = false;
4134
4135 WARN_ON(host == NULL);
4136 if (host == NULL)
4137 return -EINVAL;
4138
4139 mmc = host->mmc;
4140
4141 /*
4142 * If there are external regulators, get them. Note this must be done
4143 * early before resetting the host and reading the capabilities so that
4144 * the host can take the appropriate action if regulators are not
4145 * available.
4146 */
4147 if (!mmc->supply.vqmmc) {
4148 ret = mmc_regulator_get_supply(mmc);
4149 if (ret)
4150 return ret;
4151 enable_vqmmc = true;
4152 }
4153
4154 DBG("Version: 0x%08x | Present: 0x%08x\n",
4155 sdhci_readw(host, SDHCI_HOST_VERSION),
4156 sdhci_readl(host, SDHCI_PRESENT_STATE));
4157 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4158 sdhci_readl(host, SDHCI_CAPABILITIES),
4159 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4160
4161 sdhci_read_caps(host);
4162
4163 override_timeout_clk = host->timeout_clk;
4164
4165 if (host->version > SDHCI_SPEC_420) {
4166 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4167 mmc_hostname(mmc), host->version);
4168 }
4169
4170 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4171 host->flags |= SDHCI_USE_SDMA;
4172 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4173 DBG("Controller doesn't have SDMA capability\n");
4174 else
4175 host->flags |= SDHCI_USE_SDMA;
4176
4177 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4178 (host->flags & SDHCI_USE_SDMA)) {
4179 DBG("Disabling DMA as it is marked broken\n");
4180 host->flags &= ~SDHCI_USE_SDMA;
4181 }
4182
4183 if ((host->version >= SDHCI_SPEC_200) &&
4184 (host->caps & SDHCI_CAN_DO_ADMA2))
4185 host->flags |= SDHCI_USE_ADMA;
4186
4187 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4188 (host->flags & SDHCI_USE_ADMA)) {
4189 DBG("Disabling ADMA as it is marked broken\n");
4190 host->flags &= ~SDHCI_USE_ADMA;
4191 }
4192
4193 if (sdhci_can_64bit_dma(host))
4194 host->flags |= SDHCI_USE_64_BIT_DMA;
4195
4196 if (host->use_external_dma) {
4197 ret = sdhci_external_dma_init(host);
4198 if (ret == -EPROBE_DEFER)
4199 goto unreg;
4200 /*
4201 * Fall back to use the DMA/PIO integrated in standard SDHCI
4202 * instead of external DMA devices.
4203 */
4204 else if (ret)
4205 sdhci_switch_external_dma(host, false);
4206 /* Disable internal DMA sources */
4207 else
4208 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4209 }
4210
4211 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4212 if (host->ops->set_dma_mask)
4213 ret = host->ops->set_dma_mask(host);
4214 else
4215 ret = sdhci_set_dma_mask(host);
4216
4217 if (!ret && host->ops->enable_dma)
4218 ret = host->ops->enable_dma(host);
4219
4220 if (ret) {
4221 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4222 mmc_hostname(mmc));
4223 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4224
4225 ret = 0;
4226 }
4227 }
4228
4229 /* SDMA does not support 64-bit DMA if v4 mode not set */
4230 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4231 host->flags &= ~SDHCI_USE_SDMA;
4232
4233 if (host->flags & SDHCI_USE_ADMA) {
4234 dma_addr_t dma;
4235 void *buf;
4236
4237 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4238 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4239 else if (!host->alloc_desc_sz)
4240 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4241
4242 host->desc_sz = host->alloc_desc_sz;
4243 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4244
4245 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4246 /*
4247 * Use zalloc to zero the reserved high 32-bits of 128-bit
4248 * descriptors so that they never need to be written.
4249 */
4250 buf = dma_alloc_coherent(mmc_dev(mmc),
4251 host->align_buffer_sz + host->adma_table_sz,
4252 &dma, GFP_KERNEL);
4253 if (!buf) {
4254 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4255 mmc_hostname(mmc));
4256 host->flags &= ~SDHCI_USE_ADMA;
4257 } else if ((dma + host->align_buffer_sz) &
4258 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4259 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4260 mmc_hostname(mmc));
4261 host->flags &= ~SDHCI_USE_ADMA;
4262 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4263 host->adma_table_sz, buf, dma);
4264 } else {
4265 host->align_buffer = buf;
4266 host->align_addr = dma;
4267
4268 host->adma_table = buf + host->align_buffer_sz;
4269 host->adma_addr = dma + host->align_buffer_sz;
4270 }
4271 }
4272
4273 /*
4274 * If we use DMA, then it's up to the caller to set the DMA
4275 * mask, but PIO does not need the hw shim so we set a new
4276 * mask here in that case.
4277 */
4278 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4279 host->dma_mask = DMA_BIT_MASK(64);
4280 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4281 }
4282
4283 if (host->version >= SDHCI_SPEC_300)
4284 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4285 else
4286 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4287
4288 host->max_clk *= 1000000;
4289 if (host->max_clk == 0 || host->quirks &
4290 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4291 if (!host->ops->get_max_clock) {
4292 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4293 mmc_hostname(mmc));
4294 ret = -ENODEV;
4295 goto undma;
4296 }
4297 host->max_clk = host->ops->get_max_clock(host);
4298 }
4299
4300 /*
4301 * In case of Host Controller v3.00, find out whether clock
4302 * multiplier is supported.
4303 */
4304 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4305
4306 /*
4307 * In case the value in Clock Multiplier is 0, then programmable
4308 * clock mode is not supported, otherwise the actual clock
4309 * multiplier is one more than the value of Clock Multiplier
4310 * in the Capabilities Register.
4311 */
4312 if (host->clk_mul)
4313 host->clk_mul += 1;
4314
4315 /*
4316 * Set host parameters.
4317 */
4318 max_clk = host->max_clk;
4319
4320 if (host->ops->get_min_clock)
4321 mmc->f_min = host->ops->get_min_clock(host);
4322 else if (host->version >= SDHCI_SPEC_300) {
4323 if (host->clk_mul)
4324 max_clk = host->max_clk * host->clk_mul;
4325 /*
4326 * Divided Clock Mode minimum clock rate is always less than
4327 * Programmable Clock Mode minimum clock rate.
4328 */
4329 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4330 } else
4331 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4332
4333 if (!mmc->f_max || mmc->f_max > max_clk)
4334 mmc->f_max = max_clk;
4335
4336 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4337 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4338
4339 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4340 host->timeout_clk *= 1000;
4341
4342 if (host->timeout_clk == 0) {
4343 if (!host->ops->get_timeout_clock) {
4344 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4345 mmc_hostname(mmc));
4346 ret = -ENODEV;
4347 goto undma;
4348 }
4349
4350 host->timeout_clk =
4351 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4352 1000);
4353 }
4354
4355 if (override_timeout_clk)
4356 host->timeout_clk = override_timeout_clk;
4357
4358 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4359 host->ops->get_max_timeout_count(host) : 1 << 27;
4360 mmc->max_busy_timeout /= host->timeout_clk;
4361 }
4362
4363 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4364 !host->ops->get_max_timeout_count)
4365 mmc->max_busy_timeout = 0;
4366
4367 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4368 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4369
4370 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4371 host->flags |= SDHCI_AUTO_CMD12;
4372
4373 /*
4374 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4375 * For v4 mode, SDMA may use Auto-CMD23 as well.
4376 */
4377 if ((host->version >= SDHCI_SPEC_300) &&
4378 ((host->flags & SDHCI_USE_ADMA) ||
4379 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4380 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4381 host->flags |= SDHCI_AUTO_CMD23;
4382 DBG("Auto-CMD23 available\n");
4383 } else {
4384 DBG("Auto-CMD23 unavailable\n");
4385 }
4386
4387 /*
4388 * A controller may support 8-bit width, but the board itself
4389 * might not have the pins brought out. Boards that support
4390 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4391 * their platform code before calling sdhci_add_host(), and we
4392 * won't assume 8-bit width for hosts without that CAP.
4393 */
4394 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4395 mmc->caps |= MMC_CAP_4_BIT_DATA;
4396
4397 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4398 mmc->caps &= ~MMC_CAP_CMD23;
4399
4400 if (host->caps & SDHCI_CAN_DO_HISPD)
4401 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4402
4403 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4404 mmc_card_is_removable(mmc) &&
4405 mmc_gpio_get_cd(mmc) < 0)
4406 mmc->caps |= MMC_CAP_NEEDS_POLL;
4407
4408 if (!IS_ERR(mmc->supply.vqmmc)) {
4409 if (enable_vqmmc) {
4410 ret = regulator_enable(mmc->supply.vqmmc);
4411 host->sdhci_core_to_disable_vqmmc = !ret;
4412 }
4413
4414 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4415 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4416 1950000))
4417 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4418 SDHCI_SUPPORT_SDR50 |
4419 SDHCI_SUPPORT_DDR50);
4420
4421 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4422 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4423 3600000))
4424 host->flags &= ~SDHCI_SIGNALING_330;
4425
4426 if (ret) {
4427 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4428 mmc_hostname(mmc), ret);
4429 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4430 }
4431
4432 }
4433
4434 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4435 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4436 SDHCI_SUPPORT_DDR50);
4437 /*
4438 * The SDHCI controller in a SoC might support HS200/HS400
4439 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4440 * but if the board is modeled such that the IO lines are not
4441 * connected to 1.8v then HS200/HS400 cannot be supported.
4442 * Disable HS200/HS400 if the board does not have 1.8v connected
4443 * to the IO lines. (Applicable for other modes in 1.8v)
4444 */
4445 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4446 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4447 }
4448
4449 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4450 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4451 SDHCI_SUPPORT_DDR50))
4452 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4453
4454 /* SDR104 supports also implies SDR50 support */
4455 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4456 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4457 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4458 * field can be promoted to support HS200.
4459 */
4460 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4461 mmc->caps2 |= MMC_CAP2_HS200;
4462 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4463 mmc->caps |= MMC_CAP_UHS_SDR50;
4464 }
4465
4466 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4467 (host->caps1 & SDHCI_SUPPORT_HS400))
4468 mmc->caps2 |= MMC_CAP2_HS400;
4469
4470 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4471 (IS_ERR(mmc->supply.vqmmc) ||
4472 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4473 1300000)))
4474 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4475
4476 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4477 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4478 mmc->caps |= MMC_CAP_UHS_DDR50;
4479
4480 /* Does the host need tuning for SDR50? */
4481 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4482 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4483
4484 /* Driver Type(s) (A, C, D) supported by the host */
4485 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4486 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4487 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4488 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4489 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4490 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4491
4492 /* Initial value for re-tuning timer count */
4493 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4494 host->caps1);
4495
4496 /*
4497 * In case Re-tuning Timer is not disabled, the actual value of
4498 * re-tuning timer will be 2 ^ (n - 1).
4499 */
4500 if (host->tuning_count)
4501 host->tuning_count = 1 << (host->tuning_count - 1);
4502
4503 /* Re-tuning mode supported by the Host Controller */
4504 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4505
4506 ocr_avail = 0;
4507
4508 /*
4509 * According to SD Host Controller spec v3.00, if the Host System
4510 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4511 * the value is meaningful only if Voltage Support in the Capabilities
4512 * register is set. The actual current value is 4 times the register
4513 * value.
4514 */
4515 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4516 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4517 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4518 if (curr > 0) {
4519
4520 /* convert to SDHCI_MAX_CURRENT format */
4521 curr = curr/1000; /* convert to mA */
4522 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4523
4524 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4525 max_current_caps =
4526 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4527 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4528 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4529 }
4530 }
4531
4532 if (host->caps & SDHCI_CAN_VDD_330) {
4533 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4534
4535 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4536 max_current_caps) *
4537 SDHCI_MAX_CURRENT_MULTIPLIER;
4538 }
4539 if (host->caps & SDHCI_CAN_VDD_300) {
4540 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4541
4542 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4543 max_current_caps) *
4544 SDHCI_MAX_CURRENT_MULTIPLIER;
4545 }
4546 if (host->caps & SDHCI_CAN_VDD_180) {
4547 ocr_avail |= MMC_VDD_165_195;
4548
4549 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4550 max_current_caps) *
4551 SDHCI_MAX_CURRENT_MULTIPLIER;
4552 }
4553
4554 /* If OCR set by host, use it instead. */
4555 if (host->ocr_mask)
4556 ocr_avail = host->ocr_mask;
4557
4558 /* If OCR set by external regulators, give it highest prio. */
4559 if (mmc->ocr_avail)
4560 ocr_avail = mmc->ocr_avail;
4561
4562 mmc->ocr_avail = ocr_avail;
4563 mmc->ocr_avail_sdio = ocr_avail;
4564 if (host->ocr_avail_sdio)
4565 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4566 mmc->ocr_avail_sd = ocr_avail;
4567 if (host->ocr_avail_sd)
4568 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4569 else /* normal SD controllers don't support 1.8V */
4570 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4571 mmc->ocr_avail_mmc = ocr_avail;
4572 if (host->ocr_avail_mmc)
4573 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4574
4575 if (mmc->ocr_avail == 0) {
4576 pr_err("%s: Hardware doesn't report any support voltages.\n",
4577 mmc_hostname(mmc));
4578 ret = -ENODEV;
4579 goto unreg;
4580 }
4581
4582 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4583 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4584 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4585 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4586 host->flags |= SDHCI_SIGNALING_180;
4587
4588 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4589 host->flags |= SDHCI_SIGNALING_120;
4590
4591 spin_lock_init(&host->lock);
4592
4593 /*
4594 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4595 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4596 * is less anyway.
4597 */
4598 mmc->max_req_size = 524288;
4599
4600 /*
4601 * Maximum number of segments. Depends on if the hardware
4602 * can do scatter/gather or not.
4603 */
4604 if (host->flags & SDHCI_USE_ADMA) {
4605 mmc->max_segs = SDHCI_MAX_SEGS;
4606 } else if (host->flags & SDHCI_USE_SDMA) {
4607 mmc->max_segs = 1;
4608 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4609 dma_max_mapping_size(mmc_dev(mmc)));
4610 } else { /* PIO */
4611 mmc->max_segs = SDHCI_MAX_SEGS;
4612 }
4613
4614 /*
4615 * Maximum segment size. Could be one segment with the maximum number
4616 * of bytes. When doing hardware scatter/gather, each entry cannot
4617 * be larger than 64 KiB though.
4618 */
4619 if (host->flags & SDHCI_USE_ADMA) {
4620 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4621 mmc->max_seg_size = 65535;
4622 else
4623 mmc->max_seg_size = 65536;
4624 } else {
4625 mmc->max_seg_size = mmc->max_req_size;
4626 }
4627
4628 /*
4629 * Maximum block size. This varies from controller to controller and
4630 * is specified in the capabilities register.
4631 */
4632 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4633 mmc->max_blk_size = 2;
4634 } else {
4635 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4636 SDHCI_MAX_BLOCK_SHIFT;
4637 if (mmc->max_blk_size >= 3) {
4638 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4639 mmc_hostname(mmc));
4640 mmc->max_blk_size = 0;
4641 }
4642 }
4643
4644 mmc->max_blk_size = 512 << mmc->max_blk_size;
4645
4646 /*
4647 * Maximum block count.
4648 */
4649 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4650
4651 if (mmc->max_segs == 1)
4652 /* This may alter mmc->*_blk_* parameters */
4653 sdhci_allocate_bounce_buffer(host);
4654
4655 return 0;
4656
4657 unreg:
4658 if (host->sdhci_core_to_disable_vqmmc)
4659 regulator_disable(mmc->supply.vqmmc);
4660 undma:
4661 if (host->align_buffer)
4662 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4663 host->adma_table_sz, host->align_buffer,
4664 host->align_addr);
4665 host->adma_table = NULL;
4666 host->align_buffer = NULL;
4667
4668 return ret;
4669 }
4670 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4671
sdhci_cleanup_host(struct sdhci_host * host)4672 void sdhci_cleanup_host(struct sdhci_host *host)
4673 {
4674 struct mmc_host *mmc = host->mmc;
4675
4676 if (host->sdhci_core_to_disable_vqmmc)
4677 regulator_disable(mmc->supply.vqmmc);
4678
4679 if (host->align_buffer)
4680 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4681 host->adma_table_sz, host->align_buffer,
4682 host->align_addr);
4683
4684 if (host->use_external_dma)
4685 sdhci_external_dma_release(host);
4686
4687 host->adma_table = NULL;
4688 host->align_buffer = NULL;
4689 }
4690 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4691
__sdhci_add_host(struct sdhci_host * host)4692 int __sdhci_add_host(struct sdhci_host *host)
4693 {
4694 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4695 struct mmc_host *mmc = host->mmc;
4696 int ret;
4697
4698 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4699 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4700 mmc->caps2 &= ~MMC_CAP2_CQE;
4701 mmc->cqe_ops = NULL;
4702 }
4703
4704 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4705 if (!host->complete_wq)
4706 return -ENOMEM;
4707
4708 INIT_WORK(&host->complete_work, sdhci_complete_work);
4709
4710 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4711 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4712
4713 init_waitqueue_head(&host->buf_ready_int);
4714
4715 sdhci_init(host, 0);
4716
4717 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4718 IRQF_SHARED, mmc_hostname(mmc), host);
4719 if (ret) {
4720 pr_err("%s: Failed to request IRQ %d: %d\n",
4721 mmc_hostname(mmc), host->irq, ret);
4722 goto unwq;
4723 }
4724
4725 ret = sdhci_led_register(host);
4726 if (ret) {
4727 pr_err("%s: Failed to register LED device: %d\n",
4728 mmc_hostname(mmc), ret);
4729 goto unirq;
4730 }
4731
4732 ret = mmc_add_host(mmc);
4733 if (ret)
4734 goto unled;
4735
4736 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4737 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4738 host->use_external_dma ? "External DMA" :
4739 (host->flags & SDHCI_USE_ADMA) ?
4740 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4741 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4742
4743 sdhci_enable_card_detection(host);
4744
4745 return 0;
4746
4747 unled:
4748 sdhci_led_unregister(host);
4749 unirq:
4750 sdhci_do_reset(host, SDHCI_RESET_ALL);
4751 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4752 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4753 free_irq(host->irq, host);
4754 unwq:
4755 destroy_workqueue(host->complete_wq);
4756
4757 return ret;
4758 }
4759 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4760
sdhci_add_host(struct sdhci_host * host)4761 int sdhci_add_host(struct sdhci_host *host)
4762 {
4763 int ret;
4764
4765 ret = sdhci_setup_host(host);
4766 if (ret)
4767 return ret;
4768
4769 ret = __sdhci_add_host(host);
4770 if (ret)
4771 goto cleanup;
4772
4773 return 0;
4774
4775 cleanup:
4776 sdhci_cleanup_host(host);
4777
4778 return ret;
4779 }
4780 EXPORT_SYMBOL_GPL(sdhci_add_host);
4781
sdhci_remove_host(struct sdhci_host * host,int dead)4782 void sdhci_remove_host(struct sdhci_host *host, int dead)
4783 {
4784 struct mmc_host *mmc = host->mmc;
4785 unsigned long flags;
4786
4787 if (dead) {
4788 spin_lock_irqsave(&host->lock, flags);
4789
4790 host->flags |= SDHCI_DEVICE_DEAD;
4791
4792 if (sdhci_has_requests(host)) {
4793 pr_err("%s: Controller removed during "
4794 " transfer!\n", mmc_hostname(mmc));
4795 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4796 }
4797
4798 spin_unlock_irqrestore(&host->lock, flags);
4799 }
4800
4801 sdhci_disable_card_detection(host);
4802
4803 mmc_remove_host(mmc);
4804
4805 sdhci_led_unregister(host);
4806
4807 if (!dead)
4808 sdhci_do_reset(host, SDHCI_RESET_ALL);
4809
4810 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4811 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4812 free_irq(host->irq, host);
4813
4814 del_timer_sync(&host->timer);
4815 del_timer_sync(&host->data_timer);
4816
4817 destroy_workqueue(host->complete_wq);
4818
4819 if (host->sdhci_core_to_disable_vqmmc)
4820 regulator_disable(mmc->supply.vqmmc);
4821
4822 if (host->align_buffer)
4823 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4824 host->adma_table_sz, host->align_buffer,
4825 host->align_addr);
4826
4827 if (host->use_external_dma)
4828 sdhci_external_dma_release(host);
4829
4830 host->adma_table = NULL;
4831 host->align_buffer = NULL;
4832 }
4833
4834 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4835
sdhci_free_host(struct sdhci_host * host)4836 void sdhci_free_host(struct sdhci_host *host)
4837 {
4838 mmc_free_host(host->mmc);
4839 }
4840
4841 EXPORT_SYMBOL_GPL(sdhci_free_host);
4842
4843 /*****************************************************************************\
4844 * *
4845 * Driver init/exit *
4846 * *
4847 \*****************************************************************************/
4848
sdhci_drv_init(void)4849 static int __init sdhci_drv_init(void)
4850 {
4851 pr_info(DRIVER_NAME
4852 ": Secure Digital Host Controller Interface driver\n");
4853 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4854
4855 return 0;
4856 }
4857
sdhci_drv_exit(void)4858 static void __exit sdhci_drv_exit(void)
4859 {
4860 }
4861
4862 module_init(sdhci_drv_init);
4863 module_exit(sdhci_drv_exit);
4864
4865 module_param(debug_quirks, uint, 0444);
4866 module_param(debug_quirks2, uint, 0444);
4867
4868 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4869 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4870 MODULE_LICENSE("GPL");
4871
4872 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4873 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4874