1 /*
2  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
3  *
4  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * Thanks to the following companies for their support:
12  *
13  *     - JMicron (hardware and technical support)
14  */
15 
16 #include <linux/delay.h>
17 #include <linux/ktime.h>
18 #include <linux/highmem.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/slab.h>
23 #include <linux/scatterlist.h>
24 #include <linux/sizes.h>
25 #include <linux/swiotlb.h>
26 #include <linux/regulator/consumer.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/of.h>
29 
30 #include <linux/leds.h>
31 
32 #include <linux/mmc/mmc.h>
33 #include <linux/mmc/host.h>
34 #include <linux/mmc/card.h>
35 #include <linux/mmc/sdio.h>
36 #include <linux/mmc/slot-gpio.h>
37 
38 #include "sdhci.h"
39 
40 #define DRIVER_NAME "sdhci"
41 
42 #define DBG(f, x...) \
43 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44 
45 #define SDHCI_DUMP(f, x...) \
46 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
47 
48 #define MAX_TUNING_LOOP 40
49 
50 static unsigned int debug_quirks = 0;
51 static unsigned int debug_quirks2;
52 
53 static void sdhci_finish_data(struct sdhci_host *);
54 
55 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
56 
sdhci_dumpregs(struct sdhci_host * host)57 void sdhci_dumpregs(struct sdhci_host *host)
58 {
59 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
60 
61 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
62 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
63 		   sdhci_readw(host, SDHCI_HOST_VERSION));
64 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
65 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
66 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
67 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
68 		   sdhci_readl(host, SDHCI_ARGUMENT),
69 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
70 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
71 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
72 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
73 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
74 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
75 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
76 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
77 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
78 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
79 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
80 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
81 		   sdhci_readl(host, SDHCI_INT_STATUS));
82 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
83 		   sdhci_readl(host, SDHCI_INT_ENABLE),
84 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
85 	SDHCI_DUMP("AC12 err:  0x%08x | Slot int: 0x%08x\n",
86 		   sdhci_readw(host, SDHCI_ACMD12_ERR),
87 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
88 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
89 		   sdhci_readl(host, SDHCI_CAPABILITIES),
90 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
91 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
92 		   sdhci_readw(host, SDHCI_COMMAND),
93 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
94 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
95 		   sdhci_readl(host, SDHCI_RESPONSE),
96 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
97 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
98 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
99 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
100 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
101 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
102 
103 	if (host->flags & SDHCI_USE_ADMA) {
104 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
105 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
106 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
107 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
108 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
109 		} else {
110 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
111 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
112 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
113 		}
114 	}
115 
116 	SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119 
120 /*****************************************************************************\
121  *                                                                           *
122  * Low level functions                                                       *
123  *                                                                           *
124 \*****************************************************************************/
125 
sdhci_data_line_cmd(struct mmc_command * cmd)126 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
127 {
128 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
129 }
130 
sdhci_set_card_detection(struct sdhci_host * host,bool enable)131 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
132 {
133 	u32 present;
134 
135 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
136 	    !mmc_card_is_removable(host->mmc))
137 		return;
138 
139 	if (enable) {
140 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
141 				      SDHCI_CARD_PRESENT;
142 
143 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
144 				       SDHCI_INT_CARD_INSERT;
145 	} else {
146 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
147 	}
148 
149 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
150 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
151 }
152 
sdhci_enable_card_detection(struct sdhci_host * host)153 static void sdhci_enable_card_detection(struct sdhci_host *host)
154 {
155 	sdhci_set_card_detection(host, true);
156 }
157 
sdhci_disable_card_detection(struct sdhci_host * host)158 static void sdhci_disable_card_detection(struct sdhci_host *host)
159 {
160 	sdhci_set_card_detection(host, false);
161 }
162 
sdhci_runtime_pm_bus_on(struct sdhci_host * host)163 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
164 {
165 	if (host->bus_on)
166 		return;
167 	host->bus_on = true;
168 	pm_runtime_get_noresume(host->mmc->parent);
169 }
170 
sdhci_runtime_pm_bus_off(struct sdhci_host * host)171 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
172 {
173 	if (!host->bus_on)
174 		return;
175 	host->bus_on = false;
176 	pm_runtime_put_noidle(host->mmc->parent);
177 }
178 
sdhci_reset(struct sdhci_host * host,u8 mask)179 void sdhci_reset(struct sdhci_host *host, u8 mask)
180 {
181 	ktime_t timeout;
182 
183 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
184 
185 	if (mask & SDHCI_RESET_ALL) {
186 		host->clock = 0;
187 		/* Reset-all turns off SD Bus Power */
188 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
189 			sdhci_runtime_pm_bus_off(host);
190 	}
191 
192 	/* Wait max 100 ms */
193 	timeout = ktime_add_ms(ktime_get(), 100);
194 
195 	/* hw clears the bit when it's done */
196 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
197 		if (ktime_after(ktime_get(), timeout)) {
198 			pr_err("%s: Reset 0x%x never completed.\n",
199 				mmc_hostname(host->mmc), (int)mask);
200 			sdhci_dumpregs(host);
201 			return;
202 		}
203 		udelay(10);
204 	}
205 }
206 EXPORT_SYMBOL_GPL(sdhci_reset);
207 
sdhci_do_reset(struct sdhci_host * host,u8 mask)208 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
209 {
210 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
211 		struct mmc_host *mmc = host->mmc;
212 
213 		if (!mmc->ops->get_cd(mmc))
214 			return;
215 	}
216 
217 	host->ops->reset(host, mask);
218 
219 	if (mask & SDHCI_RESET_ALL) {
220 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
221 			if (host->ops->enable_dma)
222 				host->ops->enable_dma(host);
223 		}
224 
225 		/* Resetting the controller clears many */
226 		host->preset_enabled = false;
227 	}
228 }
229 
sdhci_set_default_irqs(struct sdhci_host * host)230 static void sdhci_set_default_irqs(struct sdhci_host *host)
231 {
232 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
233 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
234 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
235 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
236 		    SDHCI_INT_RESPONSE;
237 
238 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
239 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
240 		host->ier |= SDHCI_INT_RETUNE;
241 
242 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
243 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
244 }
245 
sdhci_init(struct sdhci_host * host,int soft)246 static void sdhci_init(struct sdhci_host *host, int soft)
247 {
248 	struct mmc_host *mmc = host->mmc;
249 
250 	if (soft)
251 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
252 	else
253 		sdhci_do_reset(host, SDHCI_RESET_ALL);
254 
255 	sdhci_set_default_irqs(host);
256 
257 	host->cqe_on = false;
258 
259 	if (soft) {
260 		/* force clock reconfiguration */
261 		host->clock = 0;
262 		mmc->ops->set_ios(mmc, &mmc->ios);
263 	}
264 }
265 
sdhci_reinit(struct sdhci_host * host)266 static void sdhci_reinit(struct sdhci_host *host)
267 {
268 	sdhci_init(host, 0);
269 	sdhci_enable_card_detection(host);
270 }
271 
__sdhci_led_activate(struct sdhci_host * host)272 static void __sdhci_led_activate(struct sdhci_host *host)
273 {
274 	u8 ctrl;
275 
276 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
277 	ctrl |= SDHCI_CTRL_LED;
278 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
279 }
280 
__sdhci_led_deactivate(struct sdhci_host * host)281 static void __sdhci_led_deactivate(struct sdhci_host *host)
282 {
283 	u8 ctrl;
284 
285 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
286 	ctrl &= ~SDHCI_CTRL_LED;
287 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
288 }
289 
290 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)291 static void sdhci_led_control(struct led_classdev *led,
292 			      enum led_brightness brightness)
293 {
294 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
295 	unsigned long flags;
296 
297 	spin_lock_irqsave(&host->lock, flags);
298 
299 	if (host->runtime_suspended)
300 		goto out;
301 
302 	if (brightness == LED_OFF)
303 		__sdhci_led_deactivate(host);
304 	else
305 		__sdhci_led_activate(host);
306 out:
307 	spin_unlock_irqrestore(&host->lock, flags);
308 }
309 
sdhci_led_register(struct sdhci_host * host)310 static int sdhci_led_register(struct sdhci_host *host)
311 {
312 	struct mmc_host *mmc = host->mmc;
313 
314 	snprintf(host->led_name, sizeof(host->led_name),
315 		 "%s::", mmc_hostname(mmc));
316 
317 	host->led.name = host->led_name;
318 	host->led.brightness = LED_OFF;
319 	host->led.default_trigger = mmc_hostname(mmc);
320 	host->led.brightness_set = sdhci_led_control;
321 
322 	return led_classdev_register(mmc_dev(mmc), &host->led);
323 }
324 
sdhci_led_unregister(struct sdhci_host * host)325 static void sdhci_led_unregister(struct sdhci_host *host)
326 {
327 	led_classdev_unregister(&host->led);
328 }
329 
sdhci_led_activate(struct sdhci_host * host)330 static inline void sdhci_led_activate(struct sdhci_host *host)
331 {
332 }
333 
sdhci_led_deactivate(struct sdhci_host * host)334 static inline void sdhci_led_deactivate(struct sdhci_host *host)
335 {
336 }
337 
338 #else
339 
sdhci_led_register(struct sdhci_host * host)340 static inline int sdhci_led_register(struct sdhci_host *host)
341 {
342 	return 0;
343 }
344 
sdhci_led_unregister(struct sdhci_host * host)345 static inline void sdhci_led_unregister(struct sdhci_host *host)
346 {
347 }
348 
sdhci_led_activate(struct sdhci_host * host)349 static inline void sdhci_led_activate(struct sdhci_host *host)
350 {
351 	__sdhci_led_activate(host);
352 }
353 
sdhci_led_deactivate(struct sdhci_host * host)354 static inline void sdhci_led_deactivate(struct sdhci_host *host)
355 {
356 	__sdhci_led_deactivate(host);
357 }
358 
359 #endif
360 
361 /*****************************************************************************\
362  *                                                                           *
363  * Core functions                                                            *
364  *                                                                           *
365 \*****************************************************************************/
366 
sdhci_read_block_pio(struct sdhci_host * host)367 static void sdhci_read_block_pio(struct sdhci_host *host)
368 {
369 	unsigned long flags;
370 	size_t blksize, len, chunk;
371 	u32 uninitialized_var(scratch);
372 	u8 *buf;
373 
374 	DBG("PIO reading\n");
375 
376 	blksize = host->data->blksz;
377 	chunk = 0;
378 
379 	local_irq_save(flags);
380 
381 	while (blksize) {
382 		BUG_ON(!sg_miter_next(&host->sg_miter));
383 
384 		len = min(host->sg_miter.length, blksize);
385 
386 		blksize -= len;
387 		host->sg_miter.consumed = len;
388 
389 		buf = host->sg_miter.addr;
390 
391 		while (len) {
392 			if (chunk == 0) {
393 				scratch = sdhci_readl(host, SDHCI_BUFFER);
394 				chunk = 4;
395 			}
396 
397 			*buf = scratch & 0xFF;
398 
399 			buf++;
400 			scratch >>= 8;
401 			chunk--;
402 			len--;
403 		}
404 	}
405 
406 	sg_miter_stop(&host->sg_miter);
407 
408 	local_irq_restore(flags);
409 }
410 
sdhci_write_block_pio(struct sdhci_host * host)411 static void sdhci_write_block_pio(struct sdhci_host *host)
412 {
413 	unsigned long flags;
414 	size_t blksize, len, chunk;
415 	u32 scratch;
416 	u8 *buf;
417 
418 	DBG("PIO writing\n");
419 
420 	blksize = host->data->blksz;
421 	chunk = 0;
422 	scratch = 0;
423 
424 	local_irq_save(flags);
425 
426 	while (blksize) {
427 		BUG_ON(!sg_miter_next(&host->sg_miter));
428 
429 		len = min(host->sg_miter.length, blksize);
430 
431 		blksize -= len;
432 		host->sg_miter.consumed = len;
433 
434 		buf = host->sg_miter.addr;
435 
436 		while (len) {
437 			scratch |= (u32)*buf << (chunk * 8);
438 
439 			buf++;
440 			chunk++;
441 			len--;
442 
443 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
444 				sdhci_writel(host, scratch, SDHCI_BUFFER);
445 				chunk = 0;
446 				scratch = 0;
447 			}
448 		}
449 	}
450 
451 	sg_miter_stop(&host->sg_miter);
452 
453 	local_irq_restore(flags);
454 }
455 
sdhci_transfer_pio(struct sdhci_host * host)456 static void sdhci_transfer_pio(struct sdhci_host *host)
457 {
458 	u32 mask;
459 
460 	if (host->blocks == 0)
461 		return;
462 
463 	if (host->data->flags & MMC_DATA_READ)
464 		mask = SDHCI_DATA_AVAILABLE;
465 	else
466 		mask = SDHCI_SPACE_AVAILABLE;
467 
468 	/*
469 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
470 	 * for transfers < 4 bytes. As long as it is just one block,
471 	 * we can ignore the bits.
472 	 */
473 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
474 		(host->data->blocks == 1))
475 		mask = ~0;
476 
477 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
478 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
479 			udelay(100);
480 
481 		if (host->data->flags & MMC_DATA_READ)
482 			sdhci_read_block_pio(host);
483 		else
484 			sdhci_write_block_pio(host);
485 
486 		host->blocks--;
487 		if (host->blocks == 0)
488 			break;
489 	}
490 
491 	DBG("PIO transfer complete.\n");
492 }
493 
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)494 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
495 				  struct mmc_data *data, int cookie)
496 {
497 	int sg_count;
498 
499 	/*
500 	 * If the data buffers are already mapped, return the previous
501 	 * dma_map_sg() result.
502 	 */
503 	if (data->host_cookie == COOKIE_PRE_MAPPED)
504 		return data->sg_count;
505 
506 	/* Bounce write requests to the bounce buffer */
507 	if (host->bounce_buffer) {
508 		unsigned int length = data->blksz * data->blocks;
509 
510 		if (length > host->bounce_buffer_size) {
511 			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
512 			       mmc_hostname(host->mmc), length,
513 			       host->bounce_buffer_size);
514 			return -EIO;
515 		}
516 		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
517 			/* Copy the data to the bounce buffer */
518 			sg_copy_to_buffer(data->sg, data->sg_len,
519 					  host->bounce_buffer,
520 					  length);
521 		}
522 		/* Switch ownership to the DMA */
523 		dma_sync_single_for_device(host->mmc->parent,
524 					   host->bounce_addr,
525 					   host->bounce_buffer_size,
526 					   mmc_get_dma_dir(data));
527 		/* Just a dummy value */
528 		sg_count = 1;
529 	} else {
530 		/* Just access the data directly from memory */
531 		sg_count = dma_map_sg(mmc_dev(host->mmc),
532 				      data->sg, data->sg_len,
533 				      mmc_get_dma_dir(data));
534 	}
535 
536 	if (sg_count == 0)
537 		return -ENOSPC;
538 
539 	data->sg_count = sg_count;
540 	data->host_cookie = cookie;
541 
542 	return sg_count;
543 }
544 
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)545 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
546 {
547 	local_irq_save(*flags);
548 	return kmap_atomic(sg_page(sg)) + sg->offset;
549 }
550 
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)551 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
552 {
553 	kunmap_atomic(buffer);
554 	local_irq_restore(*flags);
555 }
556 
sdhci_adma_write_desc(struct sdhci_host * host,void * desc,dma_addr_t addr,int len,unsigned cmd)557 static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
558 				  dma_addr_t addr, int len, unsigned cmd)
559 {
560 	struct sdhci_adma2_64_desc *dma_desc = desc;
561 
562 	/* 32-bit and 64-bit descriptors have these members in same position */
563 	dma_desc->cmd = cpu_to_le16(cmd);
564 	dma_desc->len = cpu_to_le16(len);
565 	dma_desc->addr_lo = cpu_to_le32((u32)addr);
566 
567 	if (host->flags & SDHCI_USE_64_BIT_DMA)
568 		dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
569 }
570 
sdhci_adma_mark_end(void * desc)571 static void sdhci_adma_mark_end(void *desc)
572 {
573 	struct sdhci_adma2_64_desc *dma_desc = desc;
574 
575 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
576 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
577 }
578 
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)579 static void sdhci_adma_table_pre(struct sdhci_host *host,
580 	struct mmc_data *data, int sg_count)
581 {
582 	struct scatterlist *sg;
583 	unsigned long flags;
584 	dma_addr_t addr, align_addr;
585 	void *desc, *align;
586 	char *buffer;
587 	int len, offset, i;
588 
589 	/*
590 	 * The spec does not specify endianness of descriptor table.
591 	 * We currently guess that it is LE.
592 	 */
593 
594 	host->sg_count = sg_count;
595 
596 	desc = host->adma_table;
597 	align = host->align_buffer;
598 
599 	align_addr = host->align_addr;
600 
601 	for_each_sg(data->sg, sg, host->sg_count, i) {
602 		addr = sg_dma_address(sg);
603 		len = sg_dma_len(sg);
604 
605 		/*
606 		 * The SDHCI specification states that ADMA addresses must
607 		 * be 32-bit aligned. If they aren't, then we use a bounce
608 		 * buffer for the (up to three) bytes that screw up the
609 		 * alignment.
610 		 */
611 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
612 			 SDHCI_ADMA2_MASK;
613 		if (offset) {
614 			if (data->flags & MMC_DATA_WRITE) {
615 				buffer = sdhci_kmap_atomic(sg, &flags);
616 				memcpy(align, buffer, offset);
617 				sdhci_kunmap_atomic(buffer, &flags);
618 			}
619 
620 			/* tran, valid */
621 			sdhci_adma_write_desc(host, desc, align_addr, offset,
622 					      ADMA2_TRAN_VALID);
623 
624 			BUG_ON(offset > 65536);
625 
626 			align += SDHCI_ADMA2_ALIGN;
627 			align_addr += SDHCI_ADMA2_ALIGN;
628 
629 			desc += host->desc_sz;
630 
631 			addr += offset;
632 			len -= offset;
633 		}
634 
635 		BUG_ON(len > 65536);
636 
637 		if (len) {
638 			/* tran, valid */
639 			sdhci_adma_write_desc(host, desc, addr, len,
640 					      ADMA2_TRAN_VALID);
641 			desc += host->desc_sz;
642 		}
643 
644 		/*
645 		 * If this triggers then we have a calculation bug
646 		 * somewhere. :/
647 		 */
648 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
649 	}
650 
651 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
652 		/* Mark the last descriptor as the terminating descriptor */
653 		if (desc != host->adma_table) {
654 			desc -= host->desc_sz;
655 			sdhci_adma_mark_end(desc);
656 		}
657 	} else {
658 		/* Add a terminating entry - nop, end, valid */
659 		sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
660 	}
661 }
662 
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)663 static void sdhci_adma_table_post(struct sdhci_host *host,
664 	struct mmc_data *data)
665 {
666 	struct scatterlist *sg;
667 	int i, size;
668 	void *align;
669 	char *buffer;
670 	unsigned long flags;
671 
672 	if (data->flags & MMC_DATA_READ) {
673 		bool has_unaligned = false;
674 
675 		/* Do a quick scan of the SG list for any unaligned mappings */
676 		for_each_sg(data->sg, sg, host->sg_count, i)
677 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
678 				has_unaligned = true;
679 				break;
680 			}
681 
682 		if (has_unaligned) {
683 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
684 					    data->sg_len, DMA_FROM_DEVICE);
685 
686 			align = host->align_buffer;
687 
688 			for_each_sg(data->sg, sg, host->sg_count, i) {
689 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
690 					size = SDHCI_ADMA2_ALIGN -
691 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
692 
693 					buffer = sdhci_kmap_atomic(sg, &flags);
694 					memcpy(buffer, align, size);
695 					sdhci_kunmap_atomic(buffer, &flags);
696 
697 					align += SDHCI_ADMA2_ALIGN;
698 				}
699 			}
700 		}
701 	}
702 }
703 
sdhci_sdma_address(struct sdhci_host * host)704 static u32 sdhci_sdma_address(struct sdhci_host *host)
705 {
706 	if (host->bounce_buffer)
707 		return host->bounce_addr;
708 	else
709 		return sg_dma_address(host->data->sg);
710 }
711 
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)712 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
713 					 struct mmc_command *cmd,
714 					 struct mmc_data *data)
715 {
716 	unsigned int target_timeout;
717 
718 	/* timeout in us */
719 	if (!data) {
720 		target_timeout = cmd->busy_timeout * 1000;
721 	} else {
722 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
723 		if (host->clock && data->timeout_clks) {
724 			unsigned long long val;
725 
726 			/*
727 			 * data->timeout_clks is in units of clock cycles.
728 			 * host->clock is in Hz.  target_timeout is in us.
729 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
730 			 */
731 			val = 1000000ULL * data->timeout_clks;
732 			if (do_div(val, host->clock))
733 				target_timeout++;
734 			target_timeout += val;
735 		}
736 	}
737 
738 	return target_timeout;
739 }
740 
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)741 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
742 				  struct mmc_command *cmd)
743 {
744 	struct mmc_data *data = cmd->data;
745 	struct mmc_host *mmc = host->mmc;
746 	struct mmc_ios *ios = &mmc->ios;
747 	unsigned char bus_width = 1 << ios->bus_width;
748 	unsigned int blksz;
749 	unsigned int freq;
750 	u64 target_timeout;
751 	u64 transfer_time;
752 
753 	target_timeout = sdhci_target_timeout(host, cmd, data);
754 	target_timeout *= NSEC_PER_USEC;
755 
756 	if (data) {
757 		blksz = data->blksz;
758 		freq = host->mmc->actual_clock ? : host->clock;
759 		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
760 		do_div(transfer_time, freq);
761 		/* multiply by '2' to account for any unknowns */
762 		transfer_time = transfer_time * 2;
763 		/* calculate timeout for the entire data */
764 		host->data_timeout = data->blocks * target_timeout +
765 				     transfer_time;
766 	} else {
767 		host->data_timeout = target_timeout;
768 	}
769 
770 	if (host->data_timeout)
771 		host->data_timeout += MMC_CMD_TRANSFER_TIME;
772 }
773 
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)774 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
775 			     bool *too_big)
776 {
777 	u8 count;
778 	struct mmc_data *data = cmd->data;
779 	unsigned target_timeout, current_timeout;
780 
781 	*too_big = true;
782 
783 	/*
784 	 * If the host controller provides us with an incorrect timeout
785 	 * value, just skip the check and use 0xE.  The hardware may take
786 	 * longer to time out, but that's much better than having a too-short
787 	 * timeout value.
788 	 */
789 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
790 		return 0xE;
791 
792 	/* Unspecified timeout, assume max */
793 	if (!data && !cmd->busy_timeout)
794 		return 0xE;
795 
796 	/* timeout in us */
797 	target_timeout = sdhci_target_timeout(host, cmd, data);
798 
799 	/*
800 	 * Figure out needed cycles.
801 	 * We do this in steps in order to fit inside a 32 bit int.
802 	 * The first step is the minimum timeout, which will have a
803 	 * minimum resolution of 6 bits:
804 	 * (1) 2^13*1000 > 2^22,
805 	 * (2) host->timeout_clk < 2^16
806 	 *     =>
807 	 *     (1) / (2) > 2^6
808 	 */
809 	count = 0;
810 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
811 	while (current_timeout < target_timeout) {
812 		count++;
813 		current_timeout <<= 1;
814 		if (count >= 0xF)
815 			break;
816 	}
817 
818 	if (count >= 0xF) {
819 		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
820 			DBG("Too large timeout 0x%x requested for CMD%d!\n",
821 			    count, cmd->opcode);
822 		count = 0xE;
823 	} else {
824 		*too_big = false;
825 	}
826 
827 	return count;
828 }
829 
sdhci_set_transfer_irqs(struct sdhci_host * host)830 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
831 {
832 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
833 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
834 
835 	if (host->flags & SDHCI_REQ_USE_DMA)
836 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
837 	else
838 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
839 
840 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
841 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
842 }
843 
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)844 static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
845 {
846 	if (enable)
847 		host->ier |= SDHCI_INT_DATA_TIMEOUT;
848 	else
849 		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
850 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
851 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
852 }
853 
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)854 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
855 {
856 	u8 count;
857 
858 	if (host->ops->set_timeout) {
859 		host->ops->set_timeout(host, cmd);
860 	} else {
861 		bool too_big = false;
862 
863 		count = sdhci_calc_timeout(host, cmd, &too_big);
864 
865 		if (too_big &&
866 		    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
867 			sdhci_calc_sw_timeout(host, cmd);
868 			sdhci_set_data_timeout_irq(host, false);
869 		} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
870 			sdhci_set_data_timeout_irq(host, true);
871 		}
872 
873 		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
874 	}
875 }
876 
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)877 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
878 {
879 	u8 ctrl;
880 	struct mmc_data *data = cmd->data;
881 
882 	host->data_timeout = 0;
883 
884 	if (sdhci_data_line_cmd(cmd))
885 		sdhci_set_timeout(host, cmd);
886 
887 	if (!data)
888 		return;
889 
890 	WARN_ON(host->data);
891 
892 	/* Sanity checks */
893 	BUG_ON(data->blksz * data->blocks > 524288);
894 	BUG_ON(data->blksz > host->mmc->max_blk_size);
895 	BUG_ON(data->blocks > 65535);
896 
897 	host->data = data;
898 	host->data_early = 0;
899 	host->data->bytes_xfered = 0;
900 
901 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
902 		struct scatterlist *sg;
903 		unsigned int length_mask, offset_mask;
904 		int i;
905 
906 		host->flags |= SDHCI_REQ_USE_DMA;
907 
908 		/*
909 		 * FIXME: This doesn't account for merging when mapping the
910 		 * scatterlist.
911 		 *
912 		 * The assumption here being that alignment and lengths are
913 		 * the same after DMA mapping to device address space.
914 		 */
915 		length_mask = 0;
916 		offset_mask = 0;
917 		if (host->flags & SDHCI_USE_ADMA) {
918 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
919 				length_mask = 3;
920 				/*
921 				 * As we use up to 3 byte chunks to work
922 				 * around alignment problems, we need to
923 				 * check the offset as well.
924 				 */
925 				offset_mask = 3;
926 			}
927 		} else {
928 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
929 				length_mask = 3;
930 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
931 				offset_mask = 3;
932 		}
933 
934 		if (unlikely(length_mask | offset_mask)) {
935 			for_each_sg(data->sg, sg, data->sg_len, i) {
936 				if (sg->length & length_mask) {
937 					DBG("Reverting to PIO because of transfer size (%d)\n",
938 					    sg->length);
939 					host->flags &= ~SDHCI_REQ_USE_DMA;
940 					break;
941 				}
942 				if (sg->offset & offset_mask) {
943 					DBG("Reverting to PIO because of bad alignment\n");
944 					host->flags &= ~SDHCI_REQ_USE_DMA;
945 					break;
946 				}
947 			}
948 		}
949 	}
950 
951 	if (host->flags & SDHCI_REQ_USE_DMA) {
952 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
953 
954 		if (sg_cnt <= 0) {
955 			/*
956 			 * This only happens when someone fed
957 			 * us an invalid request.
958 			 */
959 			WARN_ON(1);
960 			host->flags &= ~SDHCI_REQ_USE_DMA;
961 		} else if (host->flags & SDHCI_USE_ADMA) {
962 			sdhci_adma_table_pre(host, data, sg_cnt);
963 
964 			sdhci_writel(host, host->adma_addr, SDHCI_ADMA_ADDRESS);
965 			if (host->flags & SDHCI_USE_64_BIT_DMA)
966 				sdhci_writel(host,
967 					     (u64)host->adma_addr >> 32,
968 					     SDHCI_ADMA_ADDRESS_HI);
969 		} else {
970 			WARN_ON(sg_cnt != 1);
971 			sdhci_writel(host, sdhci_sdma_address(host),
972 				     SDHCI_DMA_ADDRESS);
973 		}
974 	}
975 
976 	/*
977 	 * Always adjust the DMA selection as some controllers
978 	 * (e.g. JMicron) can't do PIO properly when the selection
979 	 * is ADMA.
980 	 */
981 	if (host->version >= SDHCI_SPEC_200) {
982 		ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
983 		ctrl &= ~SDHCI_CTRL_DMA_MASK;
984 		if ((host->flags & SDHCI_REQ_USE_DMA) &&
985 			(host->flags & SDHCI_USE_ADMA)) {
986 			if (host->flags & SDHCI_USE_64_BIT_DMA)
987 				ctrl |= SDHCI_CTRL_ADMA64;
988 			else
989 				ctrl |= SDHCI_CTRL_ADMA32;
990 		} else {
991 			ctrl |= SDHCI_CTRL_SDMA;
992 		}
993 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
994 	}
995 
996 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
997 		int flags;
998 
999 		flags = SG_MITER_ATOMIC;
1000 		if (host->data->flags & MMC_DATA_READ)
1001 			flags |= SG_MITER_TO_SG;
1002 		else
1003 			flags |= SG_MITER_FROM_SG;
1004 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1005 		host->blocks = data->blocks;
1006 	}
1007 
1008 	sdhci_set_transfer_irqs(host);
1009 
1010 	/* Set the DMA boundary value and block size */
1011 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1012 		     SDHCI_BLOCK_SIZE);
1013 	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1014 }
1015 
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1016 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1017 				    struct mmc_request *mrq)
1018 {
1019 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1020 	       !mrq->cap_cmd_during_tfr;
1021 }
1022 
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1023 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1024 	struct mmc_command *cmd)
1025 {
1026 	u16 mode = 0;
1027 	struct mmc_data *data = cmd->data;
1028 
1029 	if (data == NULL) {
1030 		if (host->quirks2 &
1031 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1032 			/* must not clear SDHCI_TRANSFER_MODE when tuning */
1033 			if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1034 				sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1035 		} else {
1036 		/* clear Auto CMD settings for no data CMDs */
1037 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1038 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1039 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1040 		}
1041 		return;
1042 	}
1043 
1044 	WARN_ON(!host->data);
1045 
1046 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1047 		mode = SDHCI_TRNS_BLK_CNT_EN;
1048 
1049 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1050 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1051 		/*
1052 		 * If we are sending CMD23, CMD12 never gets sent
1053 		 * on successful completion (so no Auto-CMD12).
1054 		 */
1055 		if (sdhci_auto_cmd12(host, cmd->mrq) &&
1056 		    (cmd->opcode != SD_IO_RW_EXTENDED))
1057 			mode |= SDHCI_TRNS_AUTO_CMD12;
1058 		else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
1059 			mode |= SDHCI_TRNS_AUTO_CMD23;
1060 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1061 		}
1062 	}
1063 
1064 	if (data->flags & MMC_DATA_READ)
1065 		mode |= SDHCI_TRNS_READ;
1066 	if (host->flags & SDHCI_REQ_USE_DMA)
1067 		mode |= SDHCI_TRNS_DMA;
1068 
1069 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1070 }
1071 
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1072 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1073 {
1074 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1075 		((mrq->cmd && mrq->cmd->error) ||
1076 		 (mrq->sbc && mrq->sbc->error) ||
1077 		 (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
1078 				(mrq->data->stop && mrq->data->stop->error))) ||
1079 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1080 }
1081 
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1082 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1083 {
1084 	int i;
1085 
1086 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1087 		if (host->mrqs_done[i] == mrq) {
1088 			WARN_ON(1);
1089 			return;
1090 		}
1091 	}
1092 
1093 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1094 		if (!host->mrqs_done[i]) {
1095 			host->mrqs_done[i] = mrq;
1096 			break;
1097 		}
1098 	}
1099 
1100 	WARN_ON(i >= SDHCI_MAX_MRQS);
1101 
1102 	tasklet_schedule(&host->finish_tasklet);
1103 }
1104 
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1105 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1106 {
1107 	if (host->cmd && host->cmd->mrq == mrq)
1108 		host->cmd = NULL;
1109 
1110 	if (host->data_cmd && host->data_cmd->mrq == mrq)
1111 		host->data_cmd = NULL;
1112 
1113 	if (host->data && host->data->mrq == mrq)
1114 		host->data = NULL;
1115 
1116 	if (sdhci_needs_reset(host, mrq))
1117 		host->pending_reset = true;
1118 
1119 	__sdhci_finish_mrq(host, mrq);
1120 }
1121 
sdhci_finish_data(struct sdhci_host * host)1122 static void sdhci_finish_data(struct sdhci_host *host)
1123 {
1124 	struct mmc_command *data_cmd = host->data_cmd;
1125 	struct mmc_data *data = host->data;
1126 
1127 	host->data = NULL;
1128 	host->data_cmd = NULL;
1129 
1130 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1131 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1132 		sdhci_adma_table_post(host, data);
1133 
1134 	/*
1135 	 * The specification states that the block count register must
1136 	 * be updated, but it does not specify at what point in the
1137 	 * data flow. That makes the register entirely useless to read
1138 	 * back so we have to assume that nothing made it to the card
1139 	 * in the event of an error.
1140 	 */
1141 	if (data->error)
1142 		data->bytes_xfered = 0;
1143 	else
1144 		data->bytes_xfered = data->blksz * data->blocks;
1145 
1146 	/*
1147 	 * Need to send CMD12 if -
1148 	 * a) open-ended multiblock transfer (no CMD23)
1149 	 * b) error in multiblock transfer
1150 	 */
1151 	if (data->stop &&
1152 	    (data->error ||
1153 	     !data->mrq->sbc)) {
1154 
1155 		/*
1156 		 * The controller needs a reset of internal state machines
1157 		 * upon error conditions.
1158 		 */
1159 		if (data->error) {
1160 			if (!host->cmd || host->cmd == data_cmd)
1161 				sdhci_do_reset(host, SDHCI_RESET_CMD);
1162 			sdhci_do_reset(host, SDHCI_RESET_DATA);
1163 		}
1164 
1165 		/*
1166 		 * 'cap_cmd_during_tfr' request must not use the command line
1167 		 * after mmc_command_done() has been called. It is upper layer's
1168 		 * responsibility to send the stop command if required.
1169 		 */
1170 		if (data->mrq->cap_cmd_during_tfr) {
1171 			sdhci_finish_mrq(host, data->mrq);
1172 		} else {
1173 			/* Avoid triggering warning in sdhci_send_command() */
1174 			host->cmd = NULL;
1175 			sdhci_send_command(host, data->stop);
1176 		}
1177 	} else {
1178 		sdhci_finish_mrq(host, data->mrq);
1179 	}
1180 }
1181 
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)1182 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
1183 			    unsigned long timeout)
1184 {
1185 	if (sdhci_data_line_cmd(mrq->cmd))
1186 		mod_timer(&host->data_timer, timeout);
1187 	else
1188 		mod_timer(&host->timer, timeout);
1189 }
1190 
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)1191 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
1192 {
1193 	if (sdhci_data_line_cmd(mrq->cmd))
1194 		del_timer(&host->data_timer);
1195 	else
1196 		del_timer(&host->timer);
1197 }
1198 
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1199 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1200 {
1201 	int flags;
1202 	u32 mask;
1203 	unsigned long timeout;
1204 
1205 	WARN_ON(host->cmd);
1206 
1207 	/* Initially, a command has no error */
1208 	cmd->error = 0;
1209 
1210 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1211 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1212 		cmd->flags |= MMC_RSP_BUSY;
1213 
1214 	/* Wait max 10 ms */
1215 	timeout = 10;
1216 
1217 	mask = SDHCI_CMD_INHIBIT;
1218 	if (sdhci_data_line_cmd(cmd))
1219 		mask |= SDHCI_DATA_INHIBIT;
1220 
1221 	/* We shouldn't wait for data inihibit for stop commands, even
1222 	   though they might use busy signaling */
1223 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1224 		mask &= ~SDHCI_DATA_INHIBIT;
1225 
1226 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1227 		if (timeout == 0) {
1228 			pr_err("%s: Controller never released inhibit bit(s).\n",
1229 			       mmc_hostname(host->mmc));
1230 			sdhci_dumpregs(host);
1231 			cmd->error = -EIO;
1232 			sdhci_finish_mrq(host, cmd->mrq);
1233 			return;
1234 		}
1235 		timeout--;
1236 		mdelay(1);
1237 	}
1238 
1239 	host->cmd = cmd;
1240 	if (sdhci_data_line_cmd(cmd)) {
1241 		WARN_ON(host->data_cmd);
1242 		host->data_cmd = cmd;
1243 	}
1244 
1245 	sdhci_prepare_data(host, cmd);
1246 
1247 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1248 
1249 	sdhci_set_transfer_mode(host, cmd);
1250 
1251 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1252 		pr_err("%s: Unsupported response type!\n",
1253 			mmc_hostname(host->mmc));
1254 		cmd->error = -EINVAL;
1255 		sdhci_finish_mrq(host, cmd->mrq);
1256 		return;
1257 	}
1258 
1259 	if (!(cmd->flags & MMC_RSP_PRESENT))
1260 		flags = SDHCI_CMD_RESP_NONE;
1261 	else if (cmd->flags & MMC_RSP_136)
1262 		flags = SDHCI_CMD_RESP_LONG;
1263 	else if (cmd->flags & MMC_RSP_BUSY)
1264 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1265 	else
1266 		flags = SDHCI_CMD_RESP_SHORT;
1267 
1268 	if (cmd->flags & MMC_RSP_CRC)
1269 		flags |= SDHCI_CMD_CRC;
1270 	if (cmd->flags & MMC_RSP_OPCODE)
1271 		flags |= SDHCI_CMD_INDEX;
1272 
1273 	/* CMD19 is special in that the Data Present Select should be set */
1274 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1275 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1276 		flags |= SDHCI_CMD_DATA;
1277 
1278 	timeout = jiffies;
1279 	if (host->data_timeout)
1280 		timeout += nsecs_to_jiffies(host->data_timeout);
1281 	else if (!cmd->data && cmd->busy_timeout > 9000)
1282 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1283 	else
1284 		timeout += 10 * HZ;
1285 	sdhci_mod_timer(host, cmd->mrq, timeout);
1286 
1287 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1288 }
1289 EXPORT_SYMBOL_GPL(sdhci_send_command);
1290 
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1291 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1292 {
1293 	int i, reg;
1294 
1295 	for (i = 0; i < 4; i++) {
1296 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1297 		cmd->resp[i] = sdhci_readl(host, reg);
1298 	}
1299 
1300 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1301 		return;
1302 
1303 	/* CRC is stripped so we need to do some shifting */
1304 	for (i = 0; i < 4; i++) {
1305 		cmd->resp[i] <<= 8;
1306 		if (i != 3)
1307 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1308 	}
1309 }
1310 
sdhci_finish_command(struct sdhci_host * host)1311 static void sdhci_finish_command(struct sdhci_host *host)
1312 {
1313 	struct mmc_command *cmd = host->cmd;
1314 
1315 	host->cmd = NULL;
1316 
1317 	if (cmd->flags & MMC_RSP_PRESENT) {
1318 		if (cmd->flags & MMC_RSP_136) {
1319 			sdhci_read_rsp_136(host, cmd);
1320 		} else {
1321 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1322 		}
1323 	}
1324 
1325 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1326 		mmc_command_done(host->mmc, cmd->mrq);
1327 
1328 	/*
1329 	 * The host can send and interrupt when the busy state has
1330 	 * ended, allowing us to wait without wasting CPU cycles.
1331 	 * The busy signal uses DAT0 so this is similar to waiting
1332 	 * for data to complete.
1333 	 *
1334 	 * Note: The 1.0 specification is a bit ambiguous about this
1335 	 *       feature so there might be some problems with older
1336 	 *       controllers.
1337 	 */
1338 	if (cmd->flags & MMC_RSP_BUSY) {
1339 		if (cmd->data) {
1340 			DBG("Cannot wait for busy signal when also doing a data transfer");
1341 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1342 			   cmd == host->data_cmd) {
1343 			/* Command complete before busy is ended */
1344 			return;
1345 		}
1346 	}
1347 
1348 	/* Finished CMD23, now send actual command. */
1349 	if (cmd == cmd->mrq->sbc) {
1350 		sdhci_send_command(host, cmd->mrq->cmd);
1351 	} else {
1352 
1353 		/* Processed actual command. */
1354 		if (host->data && host->data_early)
1355 			sdhci_finish_data(host);
1356 
1357 		if (!cmd->data)
1358 			sdhci_finish_mrq(host, cmd->mrq);
1359 	}
1360 }
1361 
sdhci_get_preset_value(struct sdhci_host * host)1362 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1363 {
1364 	u16 preset = 0;
1365 
1366 	switch (host->timing) {
1367 	case MMC_TIMING_UHS_SDR12:
1368 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1369 		break;
1370 	case MMC_TIMING_UHS_SDR25:
1371 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1372 		break;
1373 	case MMC_TIMING_UHS_SDR50:
1374 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1375 		break;
1376 	case MMC_TIMING_UHS_SDR104:
1377 	case MMC_TIMING_MMC_HS200:
1378 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1379 		break;
1380 	case MMC_TIMING_UHS_DDR50:
1381 	case MMC_TIMING_MMC_DDR52:
1382 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1383 		break;
1384 	case MMC_TIMING_MMC_HS400:
1385 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1386 		break;
1387 	default:
1388 		pr_warn("%s: Invalid UHS-I mode selected\n",
1389 			mmc_hostname(host->mmc));
1390 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1391 		break;
1392 	}
1393 	return preset;
1394 }
1395 
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1396 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1397 		   unsigned int *actual_clock)
1398 {
1399 	int div = 0; /* Initialized for compiler warning */
1400 	int real_div = div, clk_mul = 1;
1401 	u16 clk = 0;
1402 	bool switch_base_clk = false;
1403 
1404 	if (host->version >= SDHCI_SPEC_300) {
1405 		if (host->preset_enabled) {
1406 			u16 pre_val;
1407 
1408 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1409 			pre_val = sdhci_get_preset_value(host);
1410 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1411 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1412 			if (host->clk_mul &&
1413 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1414 				clk = SDHCI_PROG_CLOCK_MODE;
1415 				real_div = div + 1;
1416 				clk_mul = host->clk_mul;
1417 			} else {
1418 				real_div = max_t(int, 1, div << 1);
1419 			}
1420 			goto clock_set;
1421 		}
1422 
1423 		/*
1424 		 * Check if the Host Controller supports Programmable Clock
1425 		 * Mode.
1426 		 */
1427 		if (host->clk_mul) {
1428 			for (div = 1; div <= 1024; div++) {
1429 				if ((host->max_clk * host->clk_mul / div)
1430 					<= clock)
1431 					break;
1432 			}
1433 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1434 				/*
1435 				 * Set Programmable Clock Mode in the Clock
1436 				 * Control register.
1437 				 */
1438 				clk = SDHCI_PROG_CLOCK_MODE;
1439 				real_div = div;
1440 				clk_mul = host->clk_mul;
1441 				div--;
1442 			} else {
1443 				/*
1444 				 * Divisor can be too small to reach clock
1445 				 * speed requirement. Then use the base clock.
1446 				 */
1447 				switch_base_clk = true;
1448 			}
1449 		}
1450 
1451 		if (!host->clk_mul || switch_base_clk) {
1452 			/* Version 3.00 divisors must be a multiple of 2. */
1453 			if (host->max_clk <= clock)
1454 				div = 1;
1455 			else {
1456 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1457 				     div += 2) {
1458 					if ((host->max_clk / div) <= clock)
1459 						break;
1460 				}
1461 			}
1462 			real_div = div;
1463 			div >>= 1;
1464 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1465 				&& !div && host->max_clk <= 25000000)
1466 				div = 1;
1467 		}
1468 	} else {
1469 		/* Version 2.00 divisors must be a power of 2. */
1470 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1471 			if ((host->max_clk / div) <= clock)
1472 				break;
1473 		}
1474 		real_div = div;
1475 		div >>= 1;
1476 	}
1477 
1478 clock_set:
1479 	if (real_div)
1480 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1481 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1482 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1483 		<< SDHCI_DIVIDER_HI_SHIFT;
1484 
1485 	return clk;
1486 }
1487 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1488 
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1489 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1490 {
1491 	ktime_t timeout;
1492 
1493 	clk |= SDHCI_CLOCK_INT_EN;
1494 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1495 
1496 	/* Wait max 20 ms */
1497 	timeout = ktime_add_ms(ktime_get(), 20);
1498 	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
1499 		& SDHCI_CLOCK_INT_STABLE)) {
1500 		if (ktime_after(ktime_get(), timeout)) {
1501 			pr_err("%s: Internal clock never stabilised.\n",
1502 			       mmc_hostname(host->mmc));
1503 			sdhci_dumpregs(host);
1504 			return;
1505 		}
1506 		udelay(10);
1507 	}
1508 
1509 	clk |= SDHCI_CLOCK_CARD_EN;
1510 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1511 }
1512 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1513 
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)1514 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1515 {
1516 	u16 clk;
1517 
1518 	host->mmc->actual_clock = 0;
1519 
1520 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1521 
1522 	if (clock == 0)
1523 		return;
1524 
1525 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1526 	sdhci_enable_clk(host, clk);
1527 }
1528 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1529 
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1530 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1531 				unsigned short vdd)
1532 {
1533 	struct mmc_host *mmc = host->mmc;
1534 
1535 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1536 
1537 	if (mode != MMC_POWER_OFF)
1538 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1539 	else
1540 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1541 }
1542 
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1543 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1544 			   unsigned short vdd)
1545 {
1546 	u8 pwr = 0;
1547 
1548 	if (mode != MMC_POWER_OFF) {
1549 		switch (1 << vdd) {
1550 		case MMC_VDD_165_195:
1551 		/*
1552 		 * Without a regulator, SDHCI does not support 2.0v
1553 		 * so we only get here if the driver deliberately
1554 		 * added the 2.0v range to ocr_avail. Map it to 1.8v
1555 		 * for the purpose of turning on the power.
1556 		 */
1557 		case MMC_VDD_20_21:
1558 			pwr = SDHCI_POWER_180;
1559 			break;
1560 		case MMC_VDD_29_30:
1561 		case MMC_VDD_30_31:
1562 			pwr = SDHCI_POWER_300;
1563 			break;
1564 		case MMC_VDD_32_33:
1565 		case MMC_VDD_33_34:
1566 			pwr = SDHCI_POWER_330;
1567 			break;
1568 		default:
1569 			WARN(1, "%s: Invalid vdd %#x\n",
1570 			     mmc_hostname(host->mmc), vdd);
1571 			break;
1572 		}
1573 	}
1574 
1575 	if (host->pwr == pwr)
1576 		return;
1577 
1578 	host->pwr = pwr;
1579 
1580 	if (pwr == 0) {
1581 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1582 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1583 			sdhci_runtime_pm_bus_off(host);
1584 	} else {
1585 		/*
1586 		 * Spec says that we should clear the power reg before setting
1587 		 * a new value. Some controllers don't seem to like this though.
1588 		 */
1589 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1590 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1591 
1592 		/*
1593 		 * At least the Marvell CaFe chip gets confused if we set the
1594 		 * voltage and set turn on power at the same time, so set the
1595 		 * voltage first.
1596 		 */
1597 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1598 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1599 
1600 		pwr |= SDHCI_POWER_ON;
1601 
1602 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1603 
1604 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1605 			sdhci_runtime_pm_bus_on(host);
1606 
1607 		/*
1608 		 * Some controllers need an extra 10ms delay of 10ms before
1609 		 * they can apply clock after applying power
1610 		 */
1611 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1612 			mdelay(10);
1613 	}
1614 }
1615 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
1616 
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)1617 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
1618 		     unsigned short vdd)
1619 {
1620 	if (IS_ERR(host->mmc->supply.vmmc))
1621 		sdhci_set_power_noreg(host, mode, vdd);
1622 	else
1623 		sdhci_set_power_reg(host, mode, vdd);
1624 }
1625 EXPORT_SYMBOL_GPL(sdhci_set_power);
1626 
1627 /*****************************************************************************\
1628  *                                                                           *
1629  * MMC callbacks                                                             *
1630  *                                                                           *
1631 \*****************************************************************************/
1632 
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)1633 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1634 {
1635 	struct sdhci_host *host;
1636 	int present;
1637 	unsigned long flags;
1638 
1639 	host = mmc_priv(mmc);
1640 
1641 	/* Firstly check card presence */
1642 	present = mmc->ops->get_cd(mmc);
1643 
1644 	spin_lock_irqsave(&host->lock, flags);
1645 
1646 	sdhci_led_activate(host);
1647 
1648 	/*
1649 	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED
1650 	 * requests if Auto-CMD12 is enabled.
1651 	 */
1652 	if (sdhci_auto_cmd12(host, mrq)) {
1653 		if (mrq->stop) {
1654 			mrq->data->stop = NULL;
1655 			mrq->stop = NULL;
1656 		}
1657 	}
1658 
1659 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1660 		mrq->cmd->error = -ENOMEDIUM;
1661 		sdhci_finish_mrq(host, mrq);
1662 	} else {
1663 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
1664 			sdhci_send_command(host, mrq->sbc);
1665 		else
1666 			sdhci_send_command(host, mrq->cmd);
1667 	}
1668 
1669 	mmiowb();
1670 	spin_unlock_irqrestore(&host->lock, flags);
1671 }
1672 
sdhci_set_bus_width(struct sdhci_host * host,int width)1673 void sdhci_set_bus_width(struct sdhci_host *host, int width)
1674 {
1675 	u8 ctrl;
1676 
1677 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1678 	if (width == MMC_BUS_WIDTH_8) {
1679 		ctrl &= ~SDHCI_CTRL_4BITBUS;
1680 		ctrl |= SDHCI_CTRL_8BITBUS;
1681 	} else {
1682 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
1683 			ctrl &= ~SDHCI_CTRL_8BITBUS;
1684 		if (width == MMC_BUS_WIDTH_4)
1685 			ctrl |= SDHCI_CTRL_4BITBUS;
1686 		else
1687 			ctrl &= ~SDHCI_CTRL_4BITBUS;
1688 	}
1689 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1690 }
1691 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
1692 
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)1693 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
1694 {
1695 	u16 ctrl_2;
1696 
1697 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1698 	/* Select Bus Speed Mode for host */
1699 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1700 	if ((timing == MMC_TIMING_MMC_HS200) ||
1701 	    (timing == MMC_TIMING_UHS_SDR104))
1702 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1703 	else if (timing == MMC_TIMING_UHS_SDR12)
1704 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1705 	else if (timing == MMC_TIMING_UHS_SDR25)
1706 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1707 	else if (timing == MMC_TIMING_UHS_SDR50)
1708 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1709 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
1710 		 (timing == MMC_TIMING_MMC_DDR52))
1711 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1712 	else if (timing == MMC_TIMING_MMC_HS400)
1713 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
1714 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1715 }
1716 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
1717 
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1718 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1719 {
1720 	struct sdhci_host *host = mmc_priv(mmc);
1721 	u8 ctrl;
1722 
1723 	if (ios->power_mode == MMC_POWER_UNDEFINED)
1724 		return;
1725 
1726 	if (host->flags & SDHCI_DEVICE_DEAD) {
1727 		if (!IS_ERR(mmc->supply.vmmc) &&
1728 		    ios->power_mode == MMC_POWER_OFF)
1729 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1730 		return;
1731 	}
1732 
1733 	/*
1734 	 * Reset the chip on each power off.
1735 	 * Should clear out any weird states.
1736 	 */
1737 	if (ios->power_mode == MMC_POWER_OFF) {
1738 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
1739 		sdhci_reinit(host);
1740 	}
1741 
1742 	if (host->version >= SDHCI_SPEC_300 &&
1743 		(ios->power_mode == MMC_POWER_UP) &&
1744 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
1745 		sdhci_enable_preset_value(host, false);
1746 
1747 	if (!ios->clock || ios->clock != host->clock) {
1748 		host->ops->set_clock(host, ios->clock);
1749 		host->clock = ios->clock;
1750 
1751 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
1752 		    host->clock) {
1753 			host->timeout_clk = host->mmc->actual_clock ?
1754 						host->mmc->actual_clock / 1000 :
1755 						host->clock / 1000;
1756 			host->mmc->max_busy_timeout =
1757 				host->ops->get_max_timeout_count ?
1758 				host->ops->get_max_timeout_count(host) :
1759 				1 << 27;
1760 			host->mmc->max_busy_timeout /= host->timeout_clk;
1761 		}
1762 	}
1763 
1764 	if (host->ops->set_power)
1765 		host->ops->set_power(host, ios->power_mode, ios->vdd);
1766 	else
1767 		sdhci_set_power(host, ios->power_mode, ios->vdd);
1768 
1769 	if (host->ops->platform_send_init_74_clocks)
1770 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1771 
1772 	host->ops->set_bus_width(host, ios->bus_width);
1773 
1774 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1775 
1776 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
1777 		if (ios->timing == MMC_TIMING_SD_HS ||
1778 		     ios->timing == MMC_TIMING_MMC_HS ||
1779 		     ios->timing == MMC_TIMING_MMC_HS400 ||
1780 		     ios->timing == MMC_TIMING_MMC_HS200 ||
1781 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
1782 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
1783 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
1784 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
1785 		     ios->timing == MMC_TIMING_UHS_SDR25)
1786 			ctrl |= SDHCI_CTRL_HISPD;
1787 		else
1788 			ctrl &= ~SDHCI_CTRL_HISPD;
1789 	}
1790 
1791 	if (host->version >= SDHCI_SPEC_300) {
1792 		u16 clk, ctrl_2;
1793 
1794 		if (!host->preset_enabled) {
1795 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1796 			/*
1797 			 * We only need to set Driver Strength if the
1798 			 * preset value enable is not set.
1799 			 */
1800 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1801 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
1802 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
1803 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
1804 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
1805 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1806 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
1807 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
1808 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
1809 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
1810 			else {
1811 				pr_warn("%s: invalid driver type, default to driver type B\n",
1812 					mmc_hostname(mmc));
1813 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
1814 			}
1815 
1816 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1817 		} else {
1818 			/*
1819 			 * According to SDHC Spec v3.00, if the Preset Value
1820 			 * Enable in the Host Control 2 register is set, we
1821 			 * need to reset SD Clock Enable before changing High
1822 			 * Speed Enable to avoid generating clock gliches.
1823 			 */
1824 
1825 			/* Reset SD Clock Enable */
1826 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1827 			clk &= ~SDHCI_CLOCK_CARD_EN;
1828 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1829 
1830 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1831 
1832 			/* Re-enable SD Clock */
1833 			host->ops->set_clock(host, host->clock);
1834 		}
1835 
1836 		/* Reset SD Clock Enable */
1837 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1838 		clk &= ~SDHCI_CLOCK_CARD_EN;
1839 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1840 
1841 		host->ops->set_uhs_signaling(host, ios->timing);
1842 		host->timing = ios->timing;
1843 
1844 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
1845 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
1846 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
1847 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
1848 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
1849 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
1850 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
1851 			u16 preset;
1852 
1853 			sdhci_enable_preset_value(host, true);
1854 			preset = sdhci_get_preset_value(host);
1855 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
1856 				>> SDHCI_PRESET_DRV_SHIFT;
1857 		}
1858 
1859 		/* Re-enable SD Clock */
1860 		host->ops->set_clock(host, host->clock);
1861 	} else
1862 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1863 
1864 	/*
1865 	 * Some (ENE) controllers go apeshit on some ios operation,
1866 	 * signalling timeout and CRC errors even on CMD0. Resetting
1867 	 * it on each ios seems to solve the problem.
1868 	 */
1869 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
1870 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
1871 
1872 	mmiowb();
1873 }
1874 EXPORT_SYMBOL_GPL(sdhci_set_ios);
1875 
sdhci_get_cd(struct mmc_host * mmc)1876 static int sdhci_get_cd(struct mmc_host *mmc)
1877 {
1878 	struct sdhci_host *host = mmc_priv(mmc);
1879 	int gpio_cd = mmc_gpio_get_cd(mmc);
1880 
1881 	if (host->flags & SDHCI_DEVICE_DEAD)
1882 		return 0;
1883 
1884 	/* If nonremovable, assume that the card is always present. */
1885 	if (!mmc_card_is_removable(host->mmc))
1886 		return 1;
1887 
1888 	/*
1889 	 * Try slot gpio detect, if defined it take precedence
1890 	 * over build in controller functionality
1891 	 */
1892 	if (gpio_cd >= 0)
1893 		return !!gpio_cd;
1894 
1895 	/* If polling, assume that the card is always present. */
1896 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
1897 		return 1;
1898 
1899 	/* Host native card detect */
1900 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
1901 }
1902 
sdhci_check_ro(struct sdhci_host * host)1903 static int sdhci_check_ro(struct sdhci_host *host)
1904 {
1905 	unsigned long flags;
1906 	int is_readonly;
1907 
1908 	spin_lock_irqsave(&host->lock, flags);
1909 
1910 	if (host->flags & SDHCI_DEVICE_DEAD)
1911 		is_readonly = 0;
1912 	else if (host->ops->get_ro)
1913 		is_readonly = host->ops->get_ro(host);
1914 	else
1915 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
1916 				& SDHCI_WRITE_PROTECT);
1917 
1918 	spin_unlock_irqrestore(&host->lock, flags);
1919 
1920 	/* This quirk needs to be replaced by a callback-function later */
1921 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
1922 		!is_readonly : is_readonly;
1923 }
1924 
1925 #define SAMPLE_COUNT	5
1926 
sdhci_get_ro(struct mmc_host * mmc)1927 static int sdhci_get_ro(struct mmc_host *mmc)
1928 {
1929 	struct sdhci_host *host = mmc_priv(mmc);
1930 	int i, ro_count;
1931 
1932 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
1933 		return sdhci_check_ro(host);
1934 
1935 	ro_count = 0;
1936 	for (i = 0; i < SAMPLE_COUNT; i++) {
1937 		if (sdhci_check_ro(host)) {
1938 			if (++ro_count > SAMPLE_COUNT / 2)
1939 				return 1;
1940 		}
1941 		msleep(30);
1942 	}
1943 	return 0;
1944 }
1945 
sdhci_hw_reset(struct mmc_host * mmc)1946 static void sdhci_hw_reset(struct mmc_host *mmc)
1947 {
1948 	struct sdhci_host *host = mmc_priv(mmc);
1949 
1950 	if (host->ops && host->ops->hw_reset)
1951 		host->ops->hw_reset(host);
1952 }
1953 
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)1954 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
1955 {
1956 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
1957 		if (enable)
1958 			host->ier |= SDHCI_INT_CARD_INT;
1959 		else
1960 			host->ier &= ~SDHCI_INT_CARD_INT;
1961 
1962 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1963 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1964 		mmiowb();
1965 	}
1966 }
1967 
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)1968 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1969 {
1970 	struct sdhci_host *host = mmc_priv(mmc);
1971 	unsigned long flags;
1972 
1973 	if (enable)
1974 		pm_runtime_get_noresume(host->mmc->parent);
1975 
1976 	spin_lock_irqsave(&host->lock, flags);
1977 	if (enable)
1978 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
1979 	else
1980 		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED;
1981 
1982 	sdhci_enable_sdio_irq_nolock(host, enable);
1983 	spin_unlock_irqrestore(&host->lock, flags);
1984 
1985 	if (!enable)
1986 		pm_runtime_put_noidle(host->mmc->parent);
1987 }
1988 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
1989 
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)1990 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
1991 				      struct mmc_ios *ios)
1992 {
1993 	struct sdhci_host *host = mmc_priv(mmc);
1994 	u16 ctrl;
1995 	int ret;
1996 
1997 	/*
1998 	 * Signal Voltage Switching is only applicable for Host Controllers
1999 	 * v3.00 and above.
2000 	 */
2001 	if (host->version < SDHCI_SPEC_300)
2002 		return 0;
2003 
2004 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2005 
2006 	switch (ios->signal_voltage) {
2007 	case MMC_SIGNAL_VOLTAGE_330:
2008 		if (!(host->flags & SDHCI_SIGNALING_330))
2009 			return -EINVAL;
2010 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2011 		ctrl &= ~SDHCI_CTRL_VDD_180;
2012 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2013 
2014 		if (!IS_ERR(mmc->supply.vqmmc)) {
2015 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2016 			if (ret) {
2017 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2018 					mmc_hostname(mmc));
2019 				return -EIO;
2020 			}
2021 		}
2022 		/* Wait for 5ms */
2023 		usleep_range(5000, 5500);
2024 
2025 		/* 3.3V regulator output should be stable within 5 ms */
2026 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2027 		if (!(ctrl & SDHCI_CTRL_VDD_180))
2028 			return 0;
2029 
2030 		pr_warn("%s: 3.3V regulator output did not became stable\n",
2031 			mmc_hostname(mmc));
2032 
2033 		return -EAGAIN;
2034 	case MMC_SIGNAL_VOLTAGE_180:
2035 		if (!(host->flags & SDHCI_SIGNALING_180))
2036 			return -EINVAL;
2037 		if (!IS_ERR(mmc->supply.vqmmc)) {
2038 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2039 			if (ret) {
2040 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2041 					mmc_hostname(mmc));
2042 				return -EIO;
2043 			}
2044 		}
2045 
2046 		/*
2047 		 * Enable 1.8V Signal Enable in the Host Control2
2048 		 * register
2049 		 */
2050 		ctrl |= SDHCI_CTRL_VDD_180;
2051 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2052 
2053 		/* Some controller need to do more when switching */
2054 		if (host->ops->voltage_switch)
2055 			host->ops->voltage_switch(host);
2056 
2057 		/* 1.8V regulator output should be stable within 5 ms */
2058 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2059 		if (ctrl & SDHCI_CTRL_VDD_180)
2060 			return 0;
2061 
2062 		pr_warn("%s: 1.8V regulator output did not became stable\n",
2063 			mmc_hostname(mmc));
2064 
2065 		return -EAGAIN;
2066 	case MMC_SIGNAL_VOLTAGE_120:
2067 		if (!(host->flags & SDHCI_SIGNALING_120))
2068 			return -EINVAL;
2069 		if (!IS_ERR(mmc->supply.vqmmc)) {
2070 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2071 			if (ret) {
2072 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2073 					mmc_hostname(mmc));
2074 				return -EIO;
2075 			}
2076 		}
2077 		return 0;
2078 	default:
2079 		/* No signal voltage switch required */
2080 		return 0;
2081 	}
2082 }
2083 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2084 
sdhci_card_busy(struct mmc_host * mmc)2085 static int sdhci_card_busy(struct mmc_host *mmc)
2086 {
2087 	struct sdhci_host *host = mmc_priv(mmc);
2088 	u32 present_state;
2089 
2090 	/* Check whether DAT[0] is 0 */
2091 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2092 
2093 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2094 }
2095 
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2096 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2097 {
2098 	struct sdhci_host *host = mmc_priv(mmc);
2099 	unsigned long flags;
2100 
2101 	spin_lock_irqsave(&host->lock, flags);
2102 	host->flags |= SDHCI_HS400_TUNING;
2103 	spin_unlock_irqrestore(&host->lock, flags);
2104 
2105 	return 0;
2106 }
2107 
sdhci_start_tuning(struct sdhci_host * host)2108 void sdhci_start_tuning(struct sdhci_host *host)
2109 {
2110 	u16 ctrl;
2111 
2112 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2113 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2114 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2115 		ctrl |= SDHCI_CTRL_TUNED_CLK;
2116 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2117 
2118 	/*
2119 	 * As per the Host Controller spec v3.00, tuning command
2120 	 * generates Buffer Read Ready interrupt, so enable that.
2121 	 *
2122 	 * Note: The spec clearly says that when tuning sequence
2123 	 * is being performed, the controller does not generate
2124 	 * interrupts other than Buffer Read Ready interrupt. But
2125 	 * to make sure we don't hit a controller bug, we _only_
2126 	 * enable Buffer Read Ready interrupt here.
2127 	 */
2128 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2129 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2130 }
2131 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2132 
sdhci_end_tuning(struct sdhci_host * host)2133 void sdhci_end_tuning(struct sdhci_host *host)
2134 {
2135 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2136 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2137 }
2138 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2139 
sdhci_reset_tuning(struct sdhci_host * host)2140 void sdhci_reset_tuning(struct sdhci_host *host)
2141 {
2142 	u16 ctrl;
2143 
2144 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2145 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2146 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2147 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2148 }
2149 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2150 
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2151 static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2152 {
2153 	sdhci_reset_tuning(host);
2154 
2155 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2156 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2157 
2158 	sdhci_end_tuning(host);
2159 
2160 	mmc_abort_tuning(host->mmc, opcode);
2161 }
2162 
2163 /*
2164  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2165  * tuning command does not have a data payload (or rather the hardware does it
2166  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2167  * interrupt setup is different to other commands and there is no timeout
2168  * interrupt so special handling is needed.
2169  */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2170 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2171 {
2172 	struct mmc_host *mmc = host->mmc;
2173 	struct mmc_command cmd = {};
2174 	struct mmc_request mrq = {};
2175 	unsigned long flags;
2176 	u32 b = host->sdma_boundary;
2177 
2178 	spin_lock_irqsave(&host->lock, flags);
2179 
2180 	cmd.opcode = opcode;
2181 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2182 	cmd.mrq = &mrq;
2183 
2184 	mrq.cmd = &cmd;
2185 	/*
2186 	 * In response to CMD19, the card sends 64 bytes of tuning
2187 	 * block to the Host Controller. So we set the block size
2188 	 * to 64 here.
2189 	 */
2190 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2191 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2192 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2193 	else
2194 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2195 
2196 	/*
2197 	 * The tuning block is sent by the card to the host controller.
2198 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2199 	 * This also takes care of setting DMA Enable and Multi Block
2200 	 * Select in the same register to 0.
2201 	 */
2202 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2203 
2204 	sdhci_send_command(host, &cmd);
2205 
2206 	host->cmd = NULL;
2207 
2208 	sdhci_del_timer(host, &mrq);
2209 
2210 	host->tuning_done = 0;
2211 
2212 	mmiowb();
2213 	spin_unlock_irqrestore(&host->lock, flags);
2214 
2215 	/* Wait for Buffer Read Ready interrupt */
2216 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2217 			   msecs_to_jiffies(50));
2218 
2219 }
2220 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2221 
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2222 static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2223 {
2224 	int i;
2225 
2226 	/*
2227 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2228 	 * of loops reaches 40 times.
2229 	 */
2230 	for (i = 0; i < MAX_TUNING_LOOP; i++) {
2231 		u16 ctrl;
2232 
2233 		sdhci_send_tuning(host, opcode);
2234 
2235 		if (!host->tuning_done) {
2236 			pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
2237 				mmc_hostname(host->mmc));
2238 			sdhci_abort_tuning(host, opcode);
2239 			return;
2240 		}
2241 
2242 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2243 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2244 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2245 				return; /* Success! */
2246 			break;
2247 		}
2248 
2249 		/* Spec does not require a delay between tuning cycles */
2250 		if (host->tuning_delay > 0)
2251 			mdelay(host->tuning_delay);
2252 	}
2253 
2254 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2255 		mmc_hostname(host->mmc));
2256 	sdhci_reset_tuning(host);
2257 }
2258 
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2259 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2260 {
2261 	struct sdhci_host *host = mmc_priv(mmc);
2262 	int err = 0;
2263 	unsigned int tuning_count = 0;
2264 	bool hs400_tuning;
2265 
2266 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2267 
2268 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2269 		tuning_count = host->tuning_count;
2270 
2271 	/*
2272 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2273 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2274 	 * the Capabilities register.
2275 	 * If the Host Controller supports the HS200 mode then the
2276 	 * tuning function has to be executed.
2277 	 */
2278 	switch (host->timing) {
2279 	/* HS400 tuning is done in HS200 mode */
2280 	case MMC_TIMING_MMC_HS400:
2281 		err = -EINVAL;
2282 		goto out;
2283 
2284 	case MMC_TIMING_MMC_HS200:
2285 		/*
2286 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2287 		 * disable it here.
2288 		 */
2289 		if (hs400_tuning)
2290 			tuning_count = 0;
2291 		break;
2292 
2293 	case MMC_TIMING_UHS_SDR104:
2294 	case MMC_TIMING_UHS_DDR50:
2295 		break;
2296 
2297 	case MMC_TIMING_UHS_SDR50:
2298 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2299 			break;
2300 		/* FALLTHROUGH */
2301 
2302 	default:
2303 		goto out;
2304 	}
2305 
2306 	if (host->ops->platform_execute_tuning) {
2307 		err = host->ops->platform_execute_tuning(host, opcode);
2308 		goto out;
2309 	}
2310 
2311 	host->mmc->retune_period = tuning_count;
2312 
2313 	if (host->tuning_delay < 0)
2314 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2315 
2316 	sdhci_start_tuning(host);
2317 
2318 	__sdhci_execute_tuning(host, opcode);
2319 
2320 	sdhci_end_tuning(host);
2321 out:
2322 	host->flags &= ~SDHCI_HS400_TUNING;
2323 
2324 	return err;
2325 }
2326 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2327 
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2328 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2329 {
2330 	/* Host Controller v3.00 defines preset value registers */
2331 	if (host->version < SDHCI_SPEC_300)
2332 		return;
2333 
2334 	/*
2335 	 * We only enable or disable Preset Value if they are not already
2336 	 * enabled or disabled respectively. Otherwise, we bail out.
2337 	 */
2338 	if (host->preset_enabled != enable) {
2339 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2340 
2341 		if (enable)
2342 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2343 		else
2344 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2345 
2346 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2347 
2348 		if (enable)
2349 			host->flags |= SDHCI_PV_ENABLED;
2350 		else
2351 			host->flags &= ~SDHCI_PV_ENABLED;
2352 
2353 		host->preset_enabled = enable;
2354 	}
2355 }
2356 
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2357 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2358 				int err)
2359 {
2360 	struct sdhci_host *host = mmc_priv(mmc);
2361 	struct mmc_data *data = mrq->data;
2362 
2363 	if (data->host_cookie != COOKIE_UNMAPPED)
2364 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2365 			     mmc_get_dma_dir(data));
2366 
2367 	data->host_cookie = COOKIE_UNMAPPED;
2368 }
2369 
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2370 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2371 {
2372 	struct sdhci_host *host = mmc_priv(mmc);
2373 
2374 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2375 
2376 	/*
2377 	 * No pre-mapping in the pre hook if we're using the bounce buffer,
2378 	 * for that we would need two bounce buffers since one buffer is
2379 	 * in flight when this is getting called.
2380 	 */
2381 	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2382 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2383 }
2384 
sdhci_has_requests(struct sdhci_host * host)2385 static inline bool sdhci_has_requests(struct sdhci_host *host)
2386 {
2387 	return host->cmd || host->data_cmd;
2388 }
2389 
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2390 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2391 {
2392 	if (host->data_cmd) {
2393 		host->data_cmd->error = err;
2394 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2395 	}
2396 
2397 	if (host->cmd) {
2398 		host->cmd->error = err;
2399 		sdhci_finish_mrq(host, host->cmd->mrq);
2400 	}
2401 }
2402 
sdhci_card_event(struct mmc_host * mmc)2403 static void sdhci_card_event(struct mmc_host *mmc)
2404 {
2405 	struct sdhci_host *host = mmc_priv(mmc);
2406 	unsigned long flags;
2407 	int present;
2408 
2409 	/* First check if client has provided their own card event */
2410 	if (host->ops->card_event)
2411 		host->ops->card_event(host);
2412 
2413 	present = mmc->ops->get_cd(mmc);
2414 
2415 	spin_lock_irqsave(&host->lock, flags);
2416 
2417 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2418 	if (sdhci_has_requests(host) && !present) {
2419 		pr_err("%s: Card removed during transfer!\n",
2420 			mmc_hostname(host->mmc));
2421 		pr_err("%s: Resetting controller.\n",
2422 			mmc_hostname(host->mmc));
2423 
2424 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2425 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2426 
2427 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2428 	}
2429 
2430 	spin_unlock_irqrestore(&host->lock, flags);
2431 }
2432 
2433 static const struct mmc_host_ops sdhci_ops = {
2434 	.request	= sdhci_request,
2435 	.post_req	= sdhci_post_req,
2436 	.pre_req	= sdhci_pre_req,
2437 	.set_ios	= sdhci_set_ios,
2438 	.get_cd		= sdhci_get_cd,
2439 	.get_ro		= sdhci_get_ro,
2440 	.hw_reset	= sdhci_hw_reset,
2441 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2442 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2443 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2444 	.execute_tuning			= sdhci_execute_tuning,
2445 	.card_event			= sdhci_card_event,
2446 	.card_busy	= sdhci_card_busy,
2447 };
2448 
2449 /*****************************************************************************\
2450  *                                                                           *
2451  * Tasklets                                                                  *
2452  *                                                                           *
2453 \*****************************************************************************/
2454 
sdhci_request_done(struct sdhci_host * host)2455 static bool sdhci_request_done(struct sdhci_host *host)
2456 {
2457 	unsigned long flags;
2458 	struct mmc_request *mrq;
2459 	int i;
2460 
2461 	spin_lock_irqsave(&host->lock, flags);
2462 
2463 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2464 		mrq = host->mrqs_done[i];
2465 		if (mrq)
2466 			break;
2467 	}
2468 
2469 	if (!mrq) {
2470 		spin_unlock_irqrestore(&host->lock, flags);
2471 		return true;
2472 	}
2473 
2474 	sdhci_del_timer(host, mrq);
2475 
2476 	/*
2477 	 * Always unmap the data buffers if they were mapped by
2478 	 * sdhci_prepare_data() whenever we finish with a request.
2479 	 * This avoids leaking DMA mappings on error.
2480 	 */
2481 	if (host->flags & SDHCI_REQ_USE_DMA) {
2482 		struct mmc_data *data = mrq->data;
2483 
2484 		if (data && data->host_cookie == COOKIE_MAPPED) {
2485 			if (host->bounce_buffer) {
2486 				/*
2487 				 * On reads, copy the bounced data into the
2488 				 * sglist
2489 				 */
2490 				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2491 					unsigned int length = data->bytes_xfered;
2492 
2493 					if (length > host->bounce_buffer_size) {
2494 						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2495 						       mmc_hostname(host->mmc),
2496 						       host->bounce_buffer_size,
2497 						       data->bytes_xfered);
2498 						/* Cap it down and continue */
2499 						length = host->bounce_buffer_size;
2500 					}
2501 					dma_sync_single_for_cpu(
2502 						host->mmc->parent,
2503 						host->bounce_addr,
2504 						host->bounce_buffer_size,
2505 						DMA_FROM_DEVICE);
2506 					sg_copy_from_buffer(data->sg,
2507 						data->sg_len,
2508 						host->bounce_buffer,
2509 						length);
2510 				} else {
2511 					/* No copying, just switch ownership */
2512 					dma_sync_single_for_cpu(
2513 						host->mmc->parent,
2514 						host->bounce_addr,
2515 						host->bounce_buffer_size,
2516 						mmc_get_dma_dir(data));
2517 				}
2518 			} else {
2519 				/* Unmap the raw data */
2520 				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2521 					     data->sg_len,
2522 					     mmc_get_dma_dir(data));
2523 			}
2524 			data->host_cookie = COOKIE_UNMAPPED;
2525 		}
2526 	}
2527 
2528 	/*
2529 	 * The controller needs a reset of internal state machines
2530 	 * upon error conditions.
2531 	 */
2532 	if (sdhci_needs_reset(host, mrq)) {
2533 		/*
2534 		 * Do not finish until command and data lines are available for
2535 		 * reset. Note there can only be one other mrq, so it cannot
2536 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2537 		 * would both be null.
2538 		 */
2539 		if (host->cmd || host->data_cmd) {
2540 			spin_unlock_irqrestore(&host->lock, flags);
2541 			return true;
2542 		}
2543 
2544 		/* Some controllers need this kick or reset won't work here */
2545 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2546 			/* This is to force an update */
2547 			host->ops->set_clock(host, host->clock);
2548 
2549 		/* Spec says we should do both at the same time, but Ricoh
2550 		   controllers do not like that. */
2551 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2552 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2553 
2554 		host->pending_reset = false;
2555 	}
2556 
2557 	if (!sdhci_has_requests(host))
2558 		sdhci_led_deactivate(host);
2559 
2560 	host->mrqs_done[i] = NULL;
2561 
2562 	mmiowb();
2563 	spin_unlock_irqrestore(&host->lock, flags);
2564 
2565 	mmc_request_done(host->mmc, mrq);
2566 
2567 	return false;
2568 }
2569 
sdhci_tasklet_finish(unsigned long param)2570 static void sdhci_tasklet_finish(unsigned long param)
2571 {
2572 	struct sdhci_host *host = (struct sdhci_host *)param;
2573 
2574 	while (!sdhci_request_done(host))
2575 		;
2576 }
2577 
sdhci_timeout_timer(struct timer_list * t)2578 static void sdhci_timeout_timer(struct timer_list *t)
2579 {
2580 	struct sdhci_host *host;
2581 	unsigned long flags;
2582 
2583 	host = from_timer(host, t, timer);
2584 
2585 	spin_lock_irqsave(&host->lock, flags);
2586 
2587 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2588 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2589 		       mmc_hostname(host->mmc));
2590 		sdhci_dumpregs(host);
2591 
2592 		host->cmd->error = -ETIMEDOUT;
2593 		sdhci_finish_mrq(host, host->cmd->mrq);
2594 	}
2595 
2596 	mmiowb();
2597 	spin_unlock_irqrestore(&host->lock, flags);
2598 }
2599 
sdhci_timeout_data_timer(struct timer_list * t)2600 static void sdhci_timeout_data_timer(struct timer_list *t)
2601 {
2602 	struct sdhci_host *host;
2603 	unsigned long flags;
2604 
2605 	host = from_timer(host, t, data_timer);
2606 
2607 	spin_lock_irqsave(&host->lock, flags);
2608 
2609 	if (host->data || host->data_cmd ||
2610 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2611 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2612 		       mmc_hostname(host->mmc));
2613 		sdhci_dumpregs(host);
2614 
2615 		if (host->data) {
2616 			host->data->error = -ETIMEDOUT;
2617 			sdhci_finish_data(host);
2618 		} else if (host->data_cmd) {
2619 			host->data_cmd->error = -ETIMEDOUT;
2620 			sdhci_finish_mrq(host, host->data_cmd->mrq);
2621 		} else {
2622 			host->cmd->error = -ETIMEDOUT;
2623 			sdhci_finish_mrq(host, host->cmd->mrq);
2624 		}
2625 	}
2626 
2627 	mmiowb();
2628 	spin_unlock_irqrestore(&host->lock, flags);
2629 }
2630 
2631 /*****************************************************************************\
2632  *                                                                           *
2633  * Interrupt handling                                                        *
2634  *                                                                           *
2635 \*****************************************************************************/
2636 
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask)2637 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
2638 {
2639 	if (!host->cmd) {
2640 		/*
2641 		 * SDHCI recovers from errors by resetting the cmd and data
2642 		 * circuits.  Until that is done, there very well might be more
2643 		 * interrupts, so ignore them in that case.
2644 		 */
2645 		if (host->pending_reset)
2646 			return;
2647 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
2648 		       mmc_hostname(host->mmc), (unsigned)intmask);
2649 		sdhci_dumpregs(host);
2650 		return;
2651 	}
2652 
2653 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
2654 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
2655 		if (intmask & SDHCI_INT_TIMEOUT)
2656 			host->cmd->error = -ETIMEDOUT;
2657 		else
2658 			host->cmd->error = -EILSEQ;
2659 
2660 		/*
2661 		 * If this command initiates a data phase and a response
2662 		 * CRC error is signalled, the card can start transferring
2663 		 * data - the card may have received the command without
2664 		 * error.  We must not terminate the mmc_request early.
2665 		 *
2666 		 * If the card did not receive the command or returned an
2667 		 * error which prevented it sending data, the data phase
2668 		 * will time out.
2669 		 */
2670 		if (host->cmd->data &&
2671 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
2672 		     SDHCI_INT_CRC) {
2673 			host->cmd = NULL;
2674 			return;
2675 		}
2676 
2677 		sdhci_finish_mrq(host, host->cmd->mrq);
2678 		return;
2679 	}
2680 
2681 	if (intmask & SDHCI_INT_RESPONSE)
2682 		sdhci_finish_command(host);
2683 }
2684 
sdhci_adma_show_error(struct sdhci_host * host)2685 static void sdhci_adma_show_error(struct sdhci_host *host)
2686 {
2687 	void *desc = host->adma_table;
2688 
2689 	sdhci_dumpregs(host);
2690 
2691 	while (true) {
2692 		struct sdhci_adma2_64_desc *dma_desc = desc;
2693 
2694 		if (host->flags & SDHCI_USE_64_BIT_DMA)
2695 			DBG("%p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
2696 			    desc, le32_to_cpu(dma_desc->addr_hi),
2697 			    le32_to_cpu(dma_desc->addr_lo),
2698 			    le16_to_cpu(dma_desc->len),
2699 			    le16_to_cpu(dma_desc->cmd));
2700 		else
2701 			DBG("%p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
2702 			    desc, le32_to_cpu(dma_desc->addr_lo),
2703 			    le16_to_cpu(dma_desc->len),
2704 			    le16_to_cpu(dma_desc->cmd));
2705 
2706 		desc += host->desc_sz;
2707 
2708 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
2709 			break;
2710 	}
2711 }
2712 
sdhci_data_irq(struct sdhci_host * host,u32 intmask)2713 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
2714 {
2715 	u32 command;
2716 
2717 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
2718 	if (intmask & SDHCI_INT_DATA_AVAIL) {
2719 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
2720 		if (command == MMC_SEND_TUNING_BLOCK ||
2721 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
2722 			host->tuning_done = 1;
2723 			wake_up(&host->buf_ready_int);
2724 			return;
2725 		}
2726 	}
2727 
2728 	if (!host->data) {
2729 		struct mmc_command *data_cmd = host->data_cmd;
2730 
2731 		/*
2732 		 * The "data complete" interrupt is also used to
2733 		 * indicate that a busy state has ended. See comment
2734 		 * above in sdhci_cmd_irq().
2735 		 */
2736 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
2737 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
2738 				host->data_cmd = NULL;
2739 				data_cmd->error = -ETIMEDOUT;
2740 				sdhci_finish_mrq(host, data_cmd->mrq);
2741 				return;
2742 			}
2743 			if (intmask & SDHCI_INT_DATA_END) {
2744 				host->data_cmd = NULL;
2745 				/*
2746 				 * Some cards handle busy-end interrupt
2747 				 * before the command completed, so make
2748 				 * sure we do things in the proper order.
2749 				 */
2750 				if (host->cmd == data_cmd)
2751 					return;
2752 
2753 				sdhci_finish_mrq(host, data_cmd->mrq);
2754 				return;
2755 			}
2756 		}
2757 
2758 		/*
2759 		 * SDHCI recovers from errors by resetting the cmd and data
2760 		 * circuits. Until that is done, there very well might be more
2761 		 * interrupts, so ignore them in that case.
2762 		 */
2763 		if (host->pending_reset)
2764 			return;
2765 
2766 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
2767 		       mmc_hostname(host->mmc), (unsigned)intmask);
2768 		sdhci_dumpregs(host);
2769 
2770 		return;
2771 	}
2772 
2773 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
2774 		host->data->error = -ETIMEDOUT;
2775 	else if (intmask & SDHCI_INT_DATA_END_BIT)
2776 		host->data->error = -EILSEQ;
2777 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
2778 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
2779 			!= MMC_BUS_TEST_R)
2780 		host->data->error = -EILSEQ;
2781 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
2782 		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
2783 		sdhci_adma_show_error(host);
2784 		host->data->error = -EIO;
2785 		if (host->ops->adma_workaround)
2786 			host->ops->adma_workaround(host, intmask);
2787 	}
2788 
2789 	if (host->data->error)
2790 		sdhci_finish_data(host);
2791 	else {
2792 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
2793 			sdhci_transfer_pio(host);
2794 
2795 		/*
2796 		 * We currently don't do anything fancy with DMA
2797 		 * boundaries, but as we can't disable the feature
2798 		 * we need to at least restart the transfer.
2799 		 *
2800 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
2801 		 * should return a valid address to continue from, but as
2802 		 * some controllers are faulty, don't trust them.
2803 		 */
2804 		if (intmask & SDHCI_INT_DMA_END) {
2805 			u32 dmastart, dmanow;
2806 
2807 			dmastart = sdhci_sdma_address(host);
2808 			dmanow = dmastart + host->data->bytes_xfered;
2809 			/*
2810 			 * Force update to the next DMA block boundary.
2811 			 */
2812 			dmanow = (dmanow &
2813 				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
2814 				SDHCI_DEFAULT_BOUNDARY_SIZE;
2815 			host->data->bytes_xfered = dmanow - dmastart;
2816 			DBG("DMA base 0x%08x, transferred 0x%06x bytes, next 0x%08x\n",
2817 			    dmastart, host->data->bytes_xfered, dmanow);
2818 			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS);
2819 		}
2820 
2821 		if (intmask & SDHCI_INT_DATA_END) {
2822 			if (host->cmd == host->data_cmd) {
2823 				/*
2824 				 * Data managed to finish before the
2825 				 * command completed. Make sure we do
2826 				 * things in the proper order.
2827 				 */
2828 				host->data_early = 1;
2829 			} else {
2830 				sdhci_finish_data(host);
2831 			}
2832 		}
2833 	}
2834 }
2835 
sdhci_irq(int irq,void * dev_id)2836 static irqreturn_t sdhci_irq(int irq, void *dev_id)
2837 {
2838 	irqreturn_t result = IRQ_NONE;
2839 	struct sdhci_host *host = dev_id;
2840 	u32 intmask, mask, unexpected = 0;
2841 	int max_loops = 16;
2842 
2843 	spin_lock(&host->lock);
2844 
2845 	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) {
2846 		spin_unlock(&host->lock);
2847 		return IRQ_NONE;
2848 	}
2849 
2850 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2851 	if (!intmask || intmask == 0xffffffff) {
2852 		result = IRQ_NONE;
2853 		goto out;
2854 	}
2855 
2856 	do {
2857 		DBG("IRQ status 0x%08x\n", intmask);
2858 
2859 		if (host->ops->irq) {
2860 			intmask = host->ops->irq(host, intmask);
2861 			if (!intmask)
2862 				goto cont;
2863 		}
2864 
2865 		/* Clear selected interrupts. */
2866 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2867 				  SDHCI_INT_BUS_POWER);
2868 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
2869 
2870 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2871 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
2872 				      SDHCI_CARD_PRESENT;
2873 
2874 			/*
2875 			 * There is a observation on i.mx esdhc.  INSERT
2876 			 * bit will be immediately set again when it gets
2877 			 * cleared, if a card is inserted.  We have to mask
2878 			 * the irq to prevent interrupt storm which will
2879 			 * freeze the system.  And the REMOVE gets the
2880 			 * same situation.
2881 			 *
2882 			 * More testing are needed here to ensure it works
2883 			 * for other platforms though.
2884 			 */
2885 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
2886 				       SDHCI_INT_CARD_REMOVE);
2887 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
2888 					       SDHCI_INT_CARD_INSERT;
2889 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2890 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2891 
2892 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
2893 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
2894 
2895 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
2896 						       SDHCI_INT_CARD_REMOVE);
2897 			result = IRQ_WAKE_THREAD;
2898 		}
2899 
2900 		if (intmask & SDHCI_INT_CMD_MASK)
2901 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK);
2902 
2903 		if (intmask & SDHCI_INT_DATA_MASK)
2904 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
2905 
2906 		if (intmask & SDHCI_INT_BUS_POWER)
2907 			pr_err("%s: Card is consuming too much power!\n",
2908 				mmc_hostname(host->mmc));
2909 
2910 		if (intmask & SDHCI_INT_RETUNE)
2911 			mmc_retune_needed(host->mmc);
2912 
2913 		if ((intmask & SDHCI_INT_CARD_INT) &&
2914 		    (host->ier & SDHCI_INT_CARD_INT)) {
2915 			sdhci_enable_sdio_irq_nolock(host, false);
2916 			host->thread_isr |= SDHCI_INT_CARD_INT;
2917 			result = IRQ_WAKE_THREAD;
2918 		}
2919 
2920 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
2921 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
2922 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
2923 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
2924 
2925 		if (intmask) {
2926 			unexpected |= intmask;
2927 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
2928 		}
2929 cont:
2930 		if (result == IRQ_NONE)
2931 			result = IRQ_HANDLED;
2932 
2933 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
2934 	} while (intmask && --max_loops);
2935 out:
2936 	spin_unlock(&host->lock);
2937 
2938 	if (unexpected) {
2939 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
2940 			   mmc_hostname(host->mmc), unexpected);
2941 		sdhci_dumpregs(host);
2942 	}
2943 
2944 	return result;
2945 }
2946 
sdhci_thread_irq(int irq,void * dev_id)2947 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
2948 {
2949 	struct sdhci_host *host = dev_id;
2950 	unsigned long flags;
2951 	u32 isr;
2952 
2953 	spin_lock_irqsave(&host->lock, flags);
2954 	isr = host->thread_isr;
2955 	host->thread_isr = 0;
2956 	spin_unlock_irqrestore(&host->lock, flags);
2957 
2958 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
2959 		struct mmc_host *mmc = host->mmc;
2960 
2961 		mmc->ops->card_event(mmc);
2962 		mmc_detect_change(mmc, msecs_to_jiffies(200));
2963 	}
2964 
2965 	if (isr & SDHCI_INT_CARD_INT) {
2966 		sdio_run_irqs(host->mmc);
2967 
2968 		spin_lock_irqsave(&host->lock, flags);
2969 		if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
2970 			sdhci_enable_sdio_irq_nolock(host, true);
2971 		spin_unlock_irqrestore(&host->lock, flags);
2972 	}
2973 
2974 	return isr ? IRQ_HANDLED : IRQ_NONE;
2975 }
2976 
2977 /*****************************************************************************\
2978  *                                                                           *
2979  * Suspend/resume                                                            *
2980  *                                                                           *
2981 \*****************************************************************************/
2982 
2983 #ifdef CONFIG_PM
2984 
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)2985 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
2986 {
2987 	return mmc_card_is_removable(host->mmc) &&
2988 	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
2989 	       !mmc_can_gpio_cd(host->mmc);
2990 }
2991 
2992 /*
2993  * To enable wakeup events, the corresponding events have to be enabled in
2994  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
2995  * Table' in the SD Host Controller Standard Specification.
2996  * It is useless to restore SDHCI_INT_ENABLE state in
2997  * sdhci_disable_irq_wakeups() since it will be set by
2998  * sdhci_enable_card_detection() or sdhci_init().
2999  */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3000 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3001 {
3002 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3003 		  SDHCI_WAKE_ON_INT;
3004 	u32 irq_val = 0;
3005 	u8 wake_val = 0;
3006 	u8 val;
3007 
3008 	if (sdhci_cd_irq_can_wakeup(host)) {
3009 		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3010 		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3011 	}
3012 
3013 	if (mmc_card_wake_sdio_irq(host->mmc)) {
3014 		wake_val |= SDHCI_WAKE_ON_INT;
3015 		irq_val |= SDHCI_INT_CARD_INT;
3016 	}
3017 
3018 	if (!irq_val)
3019 		return false;
3020 
3021 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3022 	val &= ~mask;
3023 	val |= wake_val;
3024 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3025 
3026 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3027 
3028 	host->irq_wake_enabled = !enable_irq_wake(host->irq);
3029 
3030 	return host->irq_wake_enabled;
3031 }
3032 
sdhci_disable_irq_wakeups(struct sdhci_host * host)3033 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3034 {
3035 	u8 val;
3036 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3037 			| SDHCI_WAKE_ON_INT;
3038 
3039 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3040 	val &= ~mask;
3041 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3042 
3043 	disable_irq_wake(host->irq);
3044 
3045 	host->irq_wake_enabled = false;
3046 }
3047 
sdhci_suspend_host(struct sdhci_host * host)3048 int sdhci_suspend_host(struct sdhci_host *host)
3049 {
3050 	sdhci_disable_card_detection(host);
3051 
3052 	mmc_retune_timer_stop(host->mmc);
3053 
3054 	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3055 	    !sdhci_enable_irq_wakeups(host)) {
3056 		host->ier = 0;
3057 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3058 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3059 		free_irq(host->irq, host);
3060 	}
3061 
3062 	return 0;
3063 }
3064 
3065 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3066 
sdhci_resume_host(struct sdhci_host * host)3067 int sdhci_resume_host(struct sdhci_host *host)
3068 {
3069 	struct mmc_host *mmc = host->mmc;
3070 	int ret = 0;
3071 
3072 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3073 		if (host->ops->enable_dma)
3074 			host->ops->enable_dma(host);
3075 	}
3076 
3077 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3078 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3079 		/* Card keeps power but host controller does not */
3080 		sdhci_init(host, 0);
3081 		host->pwr = 0;
3082 		host->clock = 0;
3083 		mmc->ops->set_ios(mmc, &mmc->ios);
3084 	} else {
3085 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3086 		mmiowb();
3087 	}
3088 
3089 	if (host->irq_wake_enabled) {
3090 		sdhci_disable_irq_wakeups(host);
3091 	} else {
3092 		ret = request_threaded_irq(host->irq, sdhci_irq,
3093 					   sdhci_thread_irq, IRQF_SHARED,
3094 					   mmc_hostname(host->mmc), host);
3095 		if (ret)
3096 			return ret;
3097 	}
3098 
3099 	sdhci_enable_card_detection(host);
3100 
3101 	return ret;
3102 }
3103 
3104 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3105 
sdhci_runtime_suspend_host(struct sdhci_host * host)3106 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3107 {
3108 	unsigned long flags;
3109 
3110 	mmc_retune_timer_stop(host->mmc);
3111 
3112 	spin_lock_irqsave(&host->lock, flags);
3113 	host->ier &= SDHCI_INT_CARD_INT;
3114 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3115 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3116 	spin_unlock_irqrestore(&host->lock, flags);
3117 
3118 	synchronize_hardirq(host->irq);
3119 
3120 	spin_lock_irqsave(&host->lock, flags);
3121 	host->runtime_suspended = true;
3122 	spin_unlock_irqrestore(&host->lock, flags);
3123 
3124 	return 0;
3125 }
3126 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3127 
sdhci_runtime_resume_host(struct sdhci_host * host)3128 int sdhci_runtime_resume_host(struct sdhci_host *host)
3129 {
3130 	struct mmc_host *mmc = host->mmc;
3131 	unsigned long flags;
3132 	int host_flags = host->flags;
3133 
3134 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3135 		if (host->ops->enable_dma)
3136 			host->ops->enable_dma(host);
3137 	}
3138 
3139 	sdhci_init(host, 0);
3140 
3141 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3142 	    mmc->ios.power_mode != MMC_POWER_OFF) {
3143 		/* Force clock and power re-program */
3144 		host->pwr = 0;
3145 		host->clock = 0;
3146 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3147 		mmc->ops->set_ios(mmc, &mmc->ios);
3148 
3149 		if ((host_flags & SDHCI_PV_ENABLED) &&
3150 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3151 			spin_lock_irqsave(&host->lock, flags);
3152 			sdhci_enable_preset_value(host, true);
3153 			spin_unlock_irqrestore(&host->lock, flags);
3154 		}
3155 
3156 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3157 		    mmc->ops->hs400_enhanced_strobe)
3158 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3159 	}
3160 
3161 	spin_lock_irqsave(&host->lock, flags);
3162 
3163 	host->runtime_suspended = false;
3164 
3165 	/* Enable SDIO IRQ */
3166 	if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
3167 		sdhci_enable_sdio_irq_nolock(host, true);
3168 
3169 	/* Enable Card Detection */
3170 	sdhci_enable_card_detection(host);
3171 
3172 	spin_unlock_irqrestore(&host->lock, flags);
3173 
3174 	return 0;
3175 }
3176 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3177 
3178 #endif /* CONFIG_PM */
3179 
3180 /*****************************************************************************\
3181  *                                                                           *
3182  * Command Queue Engine (CQE) helpers                                        *
3183  *                                                                           *
3184 \*****************************************************************************/
3185 
sdhci_cqe_enable(struct mmc_host * mmc)3186 void sdhci_cqe_enable(struct mmc_host *mmc)
3187 {
3188 	struct sdhci_host *host = mmc_priv(mmc);
3189 	unsigned long flags;
3190 	u8 ctrl;
3191 
3192 	spin_lock_irqsave(&host->lock, flags);
3193 
3194 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3195 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3196 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3197 		ctrl |= SDHCI_CTRL_ADMA64;
3198 	else
3199 		ctrl |= SDHCI_CTRL_ADMA32;
3200 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3201 
3202 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3203 		     SDHCI_BLOCK_SIZE);
3204 
3205 	/* Set maximum timeout */
3206 	sdhci_writeb(host, 0xE, SDHCI_TIMEOUT_CONTROL);
3207 
3208 	host->ier = host->cqe_ier;
3209 
3210 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3211 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3212 
3213 	host->cqe_on = true;
3214 
3215 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3216 		 mmc_hostname(mmc), host->ier,
3217 		 sdhci_readl(host, SDHCI_INT_STATUS));
3218 
3219 	mmiowb();
3220 	spin_unlock_irqrestore(&host->lock, flags);
3221 }
3222 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3223 
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3224 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3225 {
3226 	struct sdhci_host *host = mmc_priv(mmc);
3227 	unsigned long flags;
3228 
3229 	spin_lock_irqsave(&host->lock, flags);
3230 
3231 	sdhci_set_default_irqs(host);
3232 
3233 	host->cqe_on = false;
3234 
3235 	if (recovery) {
3236 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3237 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3238 	}
3239 
3240 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3241 		 mmc_hostname(mmc), host->ier,
3242 		 sdhci_readl(host, SDHCI_INT_STATUS));
3243 
3244 	mmiowb();
3245 	spin_unlock_irqrestore(&host->lock, flags);
3246 }
3247 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3248 
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3249 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3250 		   int *data_error)
3251 {
3252 	u32 mask;
3253 
3254 	if (!host->cqe_on)
3255 		return false;
3256 
3257 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3258 		*cmd_error = -EILSEQ;
3259 	else if (intmask & SDHCI_INT_TIMEOUT)
3260 		*cmd_error = -ETIMEDOUT;
3261 	else
3262 		*cmd_error = 0;
3263 
3264 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3265 		*data_error = -EILSEQ;
3266 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3267 		*data_error = -ETIMEDOUT;
3268 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3269 		*data_error = -EIO;
3270 	else
3271 		*data_error = 0;
3272 
3273 	/* Clear selected interrupts. */
3274 	mask = intmask & host->cqe_ier;
3275 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3276 
3277 	if (intmask & SDHCI_INT_BUS_POWER)
3278 		pr_err("%s: Card is consuming too much power!\n",
3279 		       mmc_hostname(host->mmc));
3280 
3281 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3282 	if (intmask) {
3283 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3284 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3285 		       mmc_hostname(host->mmc), intmask);
3286 		sdhci_dumpregs(host);
3287 	}
3288 
3289 	return true;
3290 }
3291 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3292 
3293 /*****************************************************************************\
3294  *                                                                           *
3295  * Device allocation/registration                                            *
3296  *                                                                           *
3297 \*****************************************************************************/
3298 
sdhci_alloc_host(struct device * dev,size_t priv_size)3299 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3300 	size_t priv_size)
3301 {
3302 	struct mmc_host *mmc;
3303 	struct sdhci_host *host;
3304 
3305 	WARN_ON(dev == NULL);
3306 
3307 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3308 	if (!mmc)
3309 		return ERR_PTR(-ENOMEM);
3310 
3311 	host = mmc_priv(mmc);
3312 	host->mmc = mmc;
3313 	host->mmc_host_ops = sdhci_ops;
3314 	mmc->ops = &host->mmc_host_ops;
3315 
3316 	host->flags = SDHCI_SIGNALING_330;
3317 
3318 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3319 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3320 
3321 	host->tuning_delay = -1;
3322 
3323 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3324 
3325 	return host;
3326 }
3327 
3328 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3329 
sdhci_set_dma_mask(struct sdhci_host * host)3330 static int sdhci_set_dma_mask(struct sdhci_host *host)
3331 {
3332 	struct mmc_host *mmc = host->mmc;
3333 	struct device *dev = mmc_dev(mmc);
3334 	int ret = -EINVAL;
3335 
3336 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3337 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3338 
3339 	/* Try 64-bit mask if hardware is capable  of it */
3340 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3341 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3342 		if (ret) {
3343 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3344 				mmc_hostname(mmc));
3345 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3346 		}
3347 	}
3348 
3349 	/* 32-bit mask as default & fallback */
3350 	if (ret) {
3351 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3352 		if (ret)
3353 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3354 				mmc_hostname(mmc));
3355 	}
3356 
3357 	return ret;
3358 }
3359 
__sdhci_read_caps(struct sdhci_host * host,u16 * ver,u32 * caps,u32 * caps1)3360 void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
3361 {
3362 	u16 v;
3363 	u64 dt_caps_mask = 0;
3364 	u64 dt_caps = 0;
3365 
3366 	if (host->read_caps)
3367 		return;
3368 
3369 	host->read_caps = true;
3370 
3371 	if (debug_quirks)
3372 		host->quirks = debug_quirks;
3373 
3374 	if (debug_quirks2)
3375 		host->quirks2 = debug_quirks2;
3376 
3377 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3378 
3379 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3380 			     "sdhci-caps-mask", &dt_caps_mask);
3381 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3382 			     "sdhci-caps", &dt_caps);
3383 
3384 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3385 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3386 
3387 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3388 		return;
3389 
3390 	if (caps) {
3391 		host->caps = *caps;
3392 	} else {
3393 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3394 		host->caps &= ~lower_32_bits(dt_caps_mask);
3395 		host->caps |= lower_32_bits(dt_caps);
3396 	}
3397 
3398 	if (host->version < SDHCI_SPEC_300)
3399 		return;
3400 
3401 	if (caps1) {
3402 		host->caps1 = *caps1;
3403 	} else {
3404 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3405 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3406 		host->caps1 |= upper_32_bits(dt_caps);
3407 	}
3408 }
3409 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3410 
sdhci_allocate_bounce_buffer(struct sdhci_host * host)3411 static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3412 {
3413 	struct mmc_host *mmc = host->mmc;
3414 	unsigned int max_blocks;
3415 	unsigned int bounce_size;
3416 	int ret;
3417 
3418 	/*
3419 	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3420 	 * has diminishing returns, this is probably because SD/MMC
3421 	 * cards are usually optimized to handle this size of requests.
3422 	 */
3423 	bounce_size = SZ_64K;
3424 	/*
3425 	 * Adjust downwards to maximum request size if this is less
3426 	 * than our segment size, else hammer down the maximum
3427 	 * request size to the maximum buffer size.
3428 	 */
3429 	if (mmc->max_req_size < bounce_size)
3430 		bounce_size = mmc->max_req_size;
3431 	max_blocks = bounce_size / 512;
3432 
3433 	/*
3434 	 * When we just support one segment, we can get significant
3435 	 * speedups by the help of a bounce buffer to group scattered
3436 	 * reads/writes together.
3437 	 */
3438 	host->bounce_buffer = devm_kmalloc(mmc->parent,
3439 					   bounce_size,
3440 					   GFP_KERNEL);
3441 	if (!host->bounce_buffer) {
3442 		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3443 		       mmc_hostname(mmc),
3444 		       bounce_size);
3445 		/*
3446 		 * Exiting with zero here makes sure we proceed with
3447 		 * mmc->max_segs == 1.
3448 		 */
3449 		return 0;
3450 	}
3451 
3452 	host->bounce_addr = dma_map_single(mmc->parent,
3453 					   host->bounce_buffer,
3454 					   bounce_size,
3455 					   DMA_BIDIRECTIONAL);
3456 	ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3457 	if (ret)
3458 		/* Again fall back to max_segs == 1 */
3459 		return 0;
3460 	host->bounce_buffer_size = bounce_size;
3461 
3462 	/* Lie about this since we're bouncing */
3463 	mmc->max_segs = max_blocks;
3464 	mmc->max_seg_size = bounce_size;
3465 	mmc->max_req_size = bounce_size;
3466 
3467 	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3468 		mmc_hostname(mmc), max_blocks, bounce_size);
3469 
3470 	return 0;
3471 }
3472 
sdhci_setup_host(struct sdhci_host * host)3473 int sdhci_setup_host(struct sdhci_host *host)
3474 {
3475 	struct mmc_host *mmc;
3476 	u32 max_current_caps;
3477 	unsigned int ocr_avail;
3478 	unsigned int override_timeout_clk;
3479 	u32 max_clk;
3480 	int ret;
3481 
3482 	WARN_ON(host == NULL);
3483 	if (host == NULL)
3484 		return -EINVAL;
3485 
3486 	mmc = host->mmc;
3487 
3488 	/*
3489 	 * If there are external regulators, get them. Note this must be done
3490 	 * early before resetting the host and reading the capabilities so that
3491 	 * the host can take the appropriate action if regulators are not
3492 	 * available.
3493 	 */
3494 	ret = mmc_regulator_get_supply(mmc);
3495 	if (ret)
3496 		return ret;
3497 
3498 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3499 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3500 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3501 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3502 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3503 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3504 
3505 	sdhci_read_caps(host);
3506 
3507 	override_timeout_clk = host->timeout_clk;
3508 
3509 	if (host->version > SDHCI_SPEC_300) {
3510 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3511 		       mmc_hostname(mmc), host->version);
3512 	}
3513 
3514 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3515 		host->flags |= SDHCI_USE_SDMA;
3516 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3517 		DBG("Controller doesn't have SDMA capability\n");
3518 	else
3519 		host->flags |= SDHCI_USE_SDMA;
3520 
3521 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3522 		(host->flags & SDHCI_USE_SDMA)) {
3523 		DBG("Disabling DMA as it is marked broken\n");
3524 		host->flags &= ~SDHCI_USE_SDMA;
3525 	}
3526 
3527 	if ((host->version >= SDHCI_SPEC_200) &&
3528 		(host->caps & SDHCI_CAN_DO_ADMA2))
3529 		host->flags |= SDHCI_USE_ADMA;
3530 
3531 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3532 		(host->flags & SDHCI_USE_ADMA)) {
3533 		DBG("Disabling ADMA as it is marked broken\n");
3534 		host->flags &= ~SDHCI_USE_ADMA;
3535 	}
3536 
3537 	/*
3538 	 * It is assumed that a 64-bit capable device has set a 64-bit DMA mask
3539 	 * and *must* do 64-bit DMA.  A driver has the opportunity to change
3540 	 * that during the first call to ->enable_dma().  Similarly
3541 	 * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
3542 	 * implement.
3543 	 */
3544 	if (host->caps & SDHCI_CAN_64BIT)
3545 		host->flags |= SDHCI_USE_64_BIT_DMA;
3546 
3547 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3548 		ret = sdhci_set_dma_mask(host);
3549 
3550 		if (!ret && host->ops->enable_dma)
3551 			ret = host->ops->enable_dma(host);
3552 
3553 		if (ret) {
3554 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
3555 				mmc_hostname(mmc));
3556 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
3557 
3558 			ret = 0;
3559 		}
3560 	}
3561 
3562 	/* SDMA does not support 64-bit DMA */
3563 	if (host->flags & SDHCI_USE_64_BIT_DMA)
3564 		host->flags &= ~SDHCI_USE_SDMA;
3565 
3566 	if (host->flags & SDHCI_USE_ADMA) {
3567 		dma_addr_t dma;
3568 		void *buf;
3569 
3570 		/*
3571 		 * The DMA descriptor table size is calculated as the maximum
3572 		 * number of segments times 2, to allow for an alignment
3573 		 * descriptor for each segment, plus 1 for a nop end descriptor,
3574 		 * all multipled by the descriptor size.
3575 		 */
3576 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
3577 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3578 					      SDHCI_ADMA2_64_DESC_SZ;
3579 			host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
3580 		} else {
3581 			host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
3582 					      SDHCI_ADMA2_32_DESC_SZ;
3583 			host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
3584 		}
3585 
3586 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
3587 		buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
3588 					 host->adma_table_sz, &dma, GFP_KERNEL);
3589 		if (!buf) {
3590 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
3591 				mmc_hostname(mmc));
3592 			host->flags &= ~SDHCI_USE_ADMA;
3593 		} else if ((dma + host->align_buffer_sz) &
3594 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
3595 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
3596 				mmc_hostname(mmc));
3597 			host->flags &= ~SDHCI_USE_ADMA;
3598 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
3599 					  host->adma_table_sz, buf, dma);
3600 		} else {
3601 			host->align_buffer = buf;
3602 			host->align_addr = dma;
3603 
3604 			host->adma_table = buf + host->align_buffer_sz;
3605 			host->adma_addr = dma + host->align_buffer_sz;
3606 		}
3607 	}
3608 
3609 	/*
3610 	 * If we use DMA, then it's up to the caller to set the DMA
3611 	 * mask, but PIO does not need the hw shim so we set a new
3612 	 * mask here in that case.
3613 	 */
3614 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
3615 		host->dma_mask = DMA_BIT_MASK(64);
3616 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
3617 	}
3618 
3619 	if (host->version >= SDHCI_SPEC_300)
3620 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
3621 			>> SDHCI_CLOCK_BASE_SHIFT;
3622 	else
3623 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
3624 			>> SDHCI_CLOCK_BASE_SHIFT;
3625 
3626 	host->max_clk *= 1000000;
3627 	if (host->max_clk == 0 || host->quirks &
3628 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
3629 		if (!host->ops->get_max_clock) {
3630 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
3631 			       mmc_hostname(mmc));
3632 			ret = -ENODEV;
3633 			goto undma;
3634 		}
3635 		host->max_clk = host->ops->get_max_clock(host);
3636 	}
3637 
3638 	/*
3639 	 * In case of Host Controller v3.00, find out whether clock
3640 	 * multiplier is supported.
3641 	 */
3642 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
3643 			SDHCI_CLOCK_MUL_SHIFT;
3644 
3645 	/*
3646 	 * In case the value in Clock Multiplier is 0, then programmable
3647 	 * clock mode is not supported, otherwise the actual clock
3648 	 * multiplier is one more than the value of Clock Multiplier
3649 	 * in the Capabilities Register.
3650 	 */
3651 	if (host->clk_mul)
3652 		host->clk_mul += 1;
3653 
3654 	/*
3655 	 * Set host parameters.
3656 	 */
3657 	max_clk = host->max_clk;
3658 
3659 	if (host->ops->get_min_clock)
3660 		mmc->f_min = host->ops->get_min_clock(host);
3661 	else if (host->version >= SDHCI_SPEC_300) {
3662 		if (host->clk_mul) {
3663 			mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3664 			max_clk = host->max_clk * host->clk_mul;
3665 		} else
3666 			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3667 	} else
3668 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3669 
3670 	if (!mmc->f_max || mmc->f_max > max_clk)
3671 		mmc->f_max = max_clk;
3672 
3673 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3674 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
3675 					SDHCI_TIMEOUT_CLK_SHIFT;
3676 
3677 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
3678 			host->timeout_clk *= 1000;
3679 
3680 		if (host->timeout_clk == 0) {
3681 			if (!host->ops->get_timeout_clock) {
3682 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
3683 					mmc_hostname(mmc));
3684 				ret = -ENODEV;
3685 				goto undma;
3686 			}
3687 
3688 			host->timeout_clk =
3689 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
3690 					     1000);
3691 		}
3692 
3693 		if (override_timeout_clk)
3694 			host->timeout_clk = override_timeout_clk;
3695 
3696 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
3697 			host->ops->get_max_timeout_count(host) : 1 << 27;
3698 		mmc->max_busy_timeout /= host->timeout_clk;
3699 	}
3700 
3701 	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
3702 	    !host->ops->get_max_timeout_count)
3703 		mmc->max_busy_timeout = 0;
3704 
3705 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
3706 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
3707 
3708 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
3709 		host->flags |= SDHCI_AUTO_CMD12;
3710 
3711 	/* Auto-CMD23 stuff only works in ADMA or PIO. */
3712 	if ((host->version >= SDHCI_SPEC_300) &&
3713 	    ((host->flags & SDHCI_USE_ADMA) ||
3714 	     !(host->flags & SDHCI_USE_SDMA)) &&
3715 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
3716 		host->flags |= SDHCI_AUTO_CMD23;
3717 		DBG("Auto-CMD23 available\n");
3718 	} else {
3719 		DBG("Auto-CMD23 unavailable\n");
3720 	}
3721 
3722 	/*
3723 	 * A controller may support 8-bit width, but the board itself
3724 	 * might not have the pins brought out.  Boards that support
3725 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
3726 	 * their platform code before calling sdhci_add_host(), and we
3727 	 * won't assume 8-bit width for hosts without that CAP.
3728 	 */
3729 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
3730 		mmc->caps |= MMC_CAP_4_BIT_DATA;
3731 
3732 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
3733 		mmc->caps &= ~MMC_CAP_CMD23;
3734 
3735 	if (host->caps & SDHCI_CAN_DO_HISPD)
3736 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
3737 
3738 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3739 	    mmc_card_is_removable(mmc) &&
3740 	    mmc_gpio_get_cd(host->mmc) < 0)
3741 		mmc->caps |= MMC_CAP_NEEDS_POLL;
3742 
3743 	if (!IS_ERR(mmc->supply.vqmmc)) {
3744 		ret = regulator_enable(mmc->supply.vqmmc);
3745 
3746 		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
3747 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
3748 						    1950000))
3749 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
3750 					 SDHCI_SUPPORT_SDR50 |
3751 					 SDHCI_SUPPORT_DDR50);
3752 
3753 		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
3754 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
3755 						    3600000))
3756 			host->flags &= ~SDHCI_SIGNALING_330;
3757 
3758 		if (ret) {
3759 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
3760 				mmc_hostname(mmc), ret);
3761 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
3762 		}
3763 	}
3764 
3765 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
3766 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3767 				 SDHCI_SUPPORT_DDR50);
3768 		/*
3769 		 * The SDHCI controller in a SoC might support HS200/HS400
3770 		 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
3771 		 * but if the board is modeled such that the IO lines are not
3772 		 * connected to 1.8v then HS200/HS400 cannot be supported.
3773 		 * Disable HS200/HS400 if the board does not have 1.8v connected
3774 		 * to the IO lines. (Applicable for other modes in 1.8v)
3775 		 */
3776 		mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
3777 		mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
3778 	}
3779 
3780 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
3781 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
3782 			   SDHCI_SUPPORT_DDR50))
3783 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
3784 
3785 	/* SDR104 supports also implies SDR50 support */
3786 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
3787 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
3788 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
3789 		 * field can be promoted to support HS200.
3790 		 */
3791 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
3792 			mmc->caps2 |= MMC_CAP2_HS200;
3793 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
3794 		mmc->caps |= MMC_CAP_UHS_SDR50;
3795 	}
3796 
3797 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
3798 	    (host->caps1 & SDHCI_SUPPORT_HS400))
3799 		mmc->caps2 |= MMC_CAP2_HS400;
3800 
3801 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
3802 	    (IS_ERR(mmc->supply.vqmmc) ||
3803 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
3804 					     1300000)))
3805 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
3806 
3807 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
3808 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
3809 		mmc->caps |= MMC_CAP_UHS_DDR50;
3810 
3811 	/* Does the host need tuning for SDR50? */
3812 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
3813 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
3814 
3815 	/* Driver Type(s) (A, C, D) supported by the host */
3816 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
3817 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
3818 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
3819 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
3820 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
3821 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
3822 
3823 	/* Initial value for re-tuning timer count */
3824 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
3825 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
3826 
3827 	/*
3828 	 * In case Re-tuning Timer is not disabled, the actual value of
3829 	 * re-tuning timer will be 2 ^ (n - 1).
3830 	 */
3831 	if (host->tuning_count)
3832 		host->tuning_count = 1 << (host->tuning_count - 1);
3833 
3834 	/* Re-tuning mode supported by the Host Controller */
3835 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
3836 			     SDHCI_RETUNING_MODE_SHIFT;
3837 
3838 	ocr_avail = 0;
3839 
3840 	/*
3841 	 * According to SD Host Controller spec v3.00, if the Host System
3842 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
3843 	 * the value is meaningful only if Voltage Support in the Capabilities
3844 	 * register is set. The actual current value is 4 times the register
3845 	 * value.
3846 	 */
3847 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
3848 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
3849 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
3850 		if (curr > 0) {
3851 
3852 			/* convert to SDHCI_MAX_CURRENT format */
3853 			curr = curr/1000;  /* convert to mA */
3854 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
3855 
3856 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
3857 			max_current_caps =
3858 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
3859 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
3860 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
3861 		}
3862 	}
3863 
3864 	if (host->caps & SDHCI_CAN_VDD_330) {
3865 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
3866 
3867 		mmc->max_current_330 = ((max_current_caps &
3868 				   SDHCI_MAX_CURRENT_330_MASK) >>
3869 				   SDHCI_MAX_CURRENT_330_SHIFT) *
3870 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3871 	}
3872 	if (host->caps & SDHCI_CAN_VDD_300) {
3873 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
3874 
3875 		mmc->max_current_300 = ((max_current_caps &
3876 				   SDHCI_MAX_CURRENT_300_MASK) >>
3877 				   SDHCI_MAX_CURRENT_300_SHIFT) *
3878 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3879 	}
3880 	if (host->caps & SDHCI_CAN_VDD_180) {
3881 		ocr_avail |= MMC_VDD_165_195;
3882 
3883 		mmc->max_current_180 = ((max_current_caps &
3884 				   SDHCI_MAX_CURRENT_180_MASK) >>
3885 				   SDHCI_MAX_CURRENT_180_SHIFT) *
3886 				   SDHCI_MAX_CURRENT_MULTIPLIER;
3887 	}
3888 
3889 	/* If OCR set by host, use it instead. */
3890 	if (host->ocr_mask)
3891 		ocr_avail = host->ocr_mask;
3892 
3893 	/* If OCR set by external regulators, give it highest prio. */
3894 	if (mmc->ocr_avail)
3895 		ocr_avail = mmc->ocr_avail;
3896 
3897 	mmc->ocr_avail = ocr_avail;
3898 	mmc->ocr_avail_sdio = ocr_avail;
3899 	if (host->ocr_avail_sdio)
3900 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
3901 	mmc->ocr_avail_sd = ocr_avail;
3902 	if (host->ocr_avail_sd)
3903 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
3904 	else /* normal SD controllers don't support 1.8V */
3905 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
3906 	mmc->ocr_avail_mmc = ocr_avail;
3907 	if (host->ocr_avail_mmc)
3908 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
3909 
3910 	if (mmc->ocr_avail == 0) {
3911 		pr_err("%s: Hardware doesn't report any support voltages.\n",
3912 		       mmc_hostname(mmc));
3913 		ret = -ENODEV;
3914 		goto unreg;
3915 	}
3916 
3917 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
3918 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
3919 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
3920 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
3921 		host->flags |= SDHCI_SIGNALING_180;
3922 
3923 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
3924 		host->flags |= SDHCI_SIGNALING_120;
3925 
3926 	spin_lock_init(&host->lock);
3927 
3928 	/*
3929 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
3930 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
3931 	 * is less anyway.
3932 	 */
3933 	mmc->max_req_size = 524288;
3934 
3935 	/*
3936 	 * Maximum number of segments. Depends on if the hardware
3937 	 * can do scatter/gather or not.
3938 	 */
3939 	if (host->flags & SDHCI_USE_ADMA) {
3940 		mmc->max_segs = SDHCI_MAX_SEGS;
3941 	} else if (host->flags & SDHCI_USE_SDMA) {
3942 		mmc->max_segs = 1;
3943 		if (swiotlb_max_segment()) {
3944 			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
3945 						IO_TLB_SEGSIZE;
3946 			mmc->max_req_size = min(mmc->max_req_size,
3947 						max_req_size);
3948 		}
3949 	} else { /* PIO */
3950 		mmc->max_segs = SDHCI_MAX_SEGS;
3951 	}
3952 
3953 	/*
3954 	 * Maximum segment size. Could be one segment with the maximum number
3955 	 * of bytes. When doing hardware scatter/gather, each entry cannot
3956 	 * be larger than 64 KiB though.
3957 	 */
3958 	if (host->flags & SDHCI_USE_ADMA) {
3959 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
3960 			mmc->max_seg_size = 65535;
3961 		else
3962 			mmc->max_seg_size = 65536;
3963 	} else {
3964 		mmc->max_seg_size = mmc->max_req_size;
3965 	}
3966 
3967 	/*
3968 	 * Maximum block size. This varies from controller to controller and
3969 	 * is specified in the capabilities register.
3970 	 */
3971 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
3972 		mmc->max_blk_size = 2;
3973 	} else {
3974 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
3975 				SDHCI_MAX_BLOCK_SHIFT;
3976 		if (mmc->max_blk_size >= 3) {
3977 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
3978 				mmc_hostname(mmc));
3979 			mmc->max_blk_size = 0;
3980 		}
3981 	}
3982 
3983 	mmc->max_blk_size = 512 << mmc->max_blk_size;
3984 
3985 	/*
3986 	 * Maximum block count.
3987 	 */
3988 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
3989 
3990 	if (mmc->max_segs == 1) {
3991 		/* This may alter mmc->*_blk_* parameters */
3992 		ret = sdhci_allocate_bounce_buffer(host);
3993 		if (ret)
3994 			return ret;
3995 	}
3996 
3997 	return 0;
3998 
3999 unreg:
4000 	if (!IS_ERR(mmc->supply.vqmmc))
4001 		regulator_disable(mmc->supply.vqmmc);
4002 undma:
4003 	if (host->align_buffer)
4004 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4005 				  host->adma_table_sz, host->align_buffer,
4006 				  host->align_addr);
4007 	host->adma_table = NULL;
4008 	host->align_buffer = NULL;
4009 
4010 	return ret;
4011 }
4012 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4013 
sdhci_cleanup_host(struct sdhci_host * host)4014 void sdhci_cleanup_host(struct sdhci_host *host)
4015 {
4016 	struct mmc_host *mmc = host->mmc;
4017 
4018 	if (!IS_ERR(mmc->supply.vqmmc))
4019 		regulator_disable(mmc->supply.vqmmc);
4020 
4021 	if (host->align_buffer)
4022 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4023 				  host->adma_table_sz, host->align_buffer,
4024 				  host->align_addr);
4025 	host->adma_table = NULL;
4026 	host->align_buffer = NULL;
4027 }
4028 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4029 
__sdhci_add_host(struct sdhci_host * host)4030 int __sdhci_add_host(struct sdhci_host *host)
4031 {
4032 	struct mmc_host *mmc = host->mmc;
4033 	int ret;
4034 
4035 	/*
4036 	 * Init tasklets.
4037 	 */
4038 	tasklet_init(&host->finish_tasklet,
4039 		sdhci_tasklet_finish, (unsigned long)host);
4040 
4041 	timer_setup(&host->timer, sdhci_timeout_timer, 0);
4042 	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4043 
4044 	init_waitqueue_head(&host->buf_ready_int);
4045 
4046 	sdhci_init(host, 0);
4047 
4048 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4049 				   IRQF_SHARED,	mmc_hostname(mmc), host);
4050 	if (ret) {
4051 		pr_err("%s: Failed to request IRQ %d: %d\n",
4052 		       mmc_hostname(mmc), host->irq, ret);
4053 		goto untasklet;
4054 	}
4055 
4056 	ret = sdhci_led_register(host);
4057 	if (ret) {
4058 		pr_err("%s: Failed to register LED device: %d\n",
4059 		       mmc_hostname(mmc), ret);
4060 		goto unirq;
4061 	}
4062 
4063 	mmiowb();
4064 
4065 	ret = mmc_add_host(mmc);
4066 	if (ret)
4067 		goto unled;
4068 
4069 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4070 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4071 		(host->flags & SDHCI_USE_ADMA) ?
4072 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4073 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4074 
4075 	sdhci_enable_card_detection(host);
4076 
4077 	return 0;
4078 
4079 unled:
4080 	sdhci_led_unregister(host);
4081 unirq:
4082 	sdhci_do_reset(host, SDHCI_RESET_ALL);
4083 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4084 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4085 	free_irq(host->irq, host);
4086 untasklet:
4087 	tasklet_kill(&host->finish_tasklet);
4088 
4089 	return ret;
4090 }
4091 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4092 
sdhci_add_host(struct sdhci_host * host)4093 int sdhci_add_host(struct sdhci_host *host)
4094 {
4095 	int ret;
4096 
4097 	ret = sdhci_setup_host(host);
4098 	if (ret)
4099 		return ret;
4100 
4101 	ret = __sdhci_add_host(host);
4102 	if (ret)
4103 		goto cleanup;
4104 
4105 	return 0;
4106 
4107 cleanup:
4108 	sdhci_cleanup_host(host);
4109 
4110 	return ret;
4111 }
4112 EXPORT_SYMBOL_GPL(sdhci_add_host);
4113 
sdhci_remove_host(struct sdhci_host * host,int dead)4114 void sdhci_remove_host(struct sdhci_host *host, int dead)
4115 {
4116 	struct mmc_host *mmc = host->mmc;
4117 	unsigned long flags;
4118 
4119 	if (dead) {
4120 		spin_lock_irqsave(&host->lock, flags);
4121 
4122 		host->flags |= SDHCI_DEVICE_DEAD;
4123 
4124 		if (sdhci_has_requests(host)) {
4125 			pr_err("%s: Controller removed during "
4126 				" transfer!\n", mmc_hostname(mmc));
4127 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
4128 		}
4129 
4130 		spin_unlock_irqrestore(&host->lock, flags);
4131 	}
4132 
4133 	sdhci_disable_card_detection(host);
4134 
4135 	mmc_remove_host(mmc);
4136 
4137 	sdhci_led_unregister(host);
4138 
4139 	if (!dead)
4140 		sdhci_do_reset(host, SDHCI_RESET_ALL);
4141 
4142 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4143 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4144 	free_irq(host->irq, host);
4145 
4146 	del_timer_sync(&host->timer);
4147 	del_timer_sync(&host->data_timer);
4148 
4149 	tasklet_kill(&host->finish_tasklet);
4150 
4151 	if (!IS_ERR(mmc->supply.vqmmc))
4152 		regulator_disable(mmc->supply.vqmmc);
4153 
4154 	if (host->align_buffer)
4155 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4156 				  host->adma_table_sz, host->align_buffer,
4157 				  host->align_addr);
4158 
4159 	host->adma_table = NULL;
4160 	host->align_buffer = NULL;
4161 }
4162 
4163 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4164 
sdhci_free_host(struct sdhci_host * host)4165 void sdhci_free_host(struct sdhci_host *host)
4166 {
4167 	mmc_free_host(host->mmc);
4168 }
4169 
4170 EXPORT_SYMBOL_GPL(sdhci_free_host);
4171 
4172 /*****************************************************************************\
4173  *                                                                           *
4174  * Driver init/exit                                                          *
4175  *                                                                           *
4176 \*****************************************************************************/
4177 
sdhci_drv_init(void)4178 static int __init sdhci_drv_init(void)
4179 {
4180 	pr_info(DRIVER_NAME
4181 		": Secure Digital Host Controller Interface driver\n");
4182 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4183 
4184 	return 0;
4185 }
4186 
sdhci_drv_exit(void)4187 static void __exit sdhci_drv_exit(void)
4188 {
4189 }
4190 
4191 module_init(sdhci_drv_init);
4192 module_exit(sdhci_drv_exit);
4193 
4194 module_param(debug_quirks, uint, 0444);
4195 module_param(debug_quirks2, uint, 0444);
4196 
4197 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4198 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4199 MODULE_LICENSE("GPL");
4200 
4201 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4202 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4203