1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4   *
5   *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6   *
7   * Thanks to the following companies for their support:
8   *
9   *     - JMicron (hardware and technical support)
10   */
11  
12  #include <linux/bitfield.h>
13  #include <linux/delay.h>
14  #include <linux/dmaengine.h>
15  #include <linux/ktime.h>
16  #include <linux/highmem.h>
17  #include <linux/io.h>
18  #include <linux/module.h>
19  #include <linux/dma-mapping.h>
20  #include <linux/slab.h>
21  #include <linux/scatterlist.h>
22  #include <linux/sizes.h>
23  #include <linux/regulator/consumer.h>
24  #include <linux/pm_runtime.h>
25  #include <linux/of.h>
26  
27  #include <linux/leds.h>
28  
29  #include <linux/mmc/mmc.h>
30  #include <linux/mmc/host.h>
31  #include <linux/mmc/card.h>
32  #include <linux/mmc/sdio.h>
33  #include <linux/mmc/slot-gpio.h>
34  
35  #include "sdhci.h"
36  
37  #define DRIVER_NAME "sdhci"
38  
39  #define DBG(f, x...) \
40  	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41  
42  #define SDHCI_DUMP(f, x...) \
43  	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44  
45  #define MAX_TUNING_LOOP 40
46  
47  static unsigned int debug_quirks = 0;
48  static unsigned int debug_quirks2;
49  
50  static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51  
52  static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53  
sdhci_dumpregs(struct sdhci_host * host)54  void sdhci_dumpregs(struct sdhci_host *host)
55  {
56  	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57  
58  	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
59  		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
60  		   sdhci_readw(host, SDHCI_HOST_VERSION));
61  	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
62  		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
63  		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
64  	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
65  		   sdhci_readl(host, SDHCI_ARGUMENT),
66  		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
67  	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
68  		   sdhci_readl(host, SDHCI_PRESENT_STATE),
69  		   sdhci_readb(host, SDHCI_HOST_CONTROL));
70  	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
71  		   sdhci_readb(host, SDHCI_POWER_CONTROL),
72  		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73  	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
74  		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75  		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76  	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
77  		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78  		   sdhci_readl(host, SDHCI_INT_STATUS));
79  	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
80  		   sdhci_readl(host, SDHCI_INT_ENABLE),
81  		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82  	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83  		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84  		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85  	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
86  		   sdhci_readl(host, SDHCI_CAPABILITIES),
87  		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
88  	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
89  		   sdhci_readw(host, SDHCI_COMMAND),
90  		   sdhci_readl(host, SDHCI_MAX_CURRENT));
91  	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
92  		   sdhci_readl(host, SDHCI_RESPONSE),
93  		   sdhci_readl(host, SDHCI_RESPONSE + 4));
94  	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
95  		   sdhci_readl(host, SDHCI_RESPONSE + 8),
96  		   sdhci_readl(host, SDHCI_RESPONSE + 12));
97  	SDHCI_DUMP("Host ctl2: 0x%08x\n",
98  		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
99  
100  	if (host->flags & SDHCI_USE_ADMA) {
101  		if (host->flags & SDHCI_USE_64_BIT_DMA) {
102  			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
103  				   sdhci_readl(host, SDHCI_ADMA_ERROR),
104  				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105  				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106  		} else {
107  			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
108  				   sdhci_readl(host, SDHCI_ADMA_ERROR),
109  				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110  		}
111  	}
112  
113  	if (host->ops->dump_vendor_regs)
114  		host->ops->dump_vendor_regs(host);
115  
116  	SDHCI_DUMP("============================================\n");
117  }
118  EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119  
120  /*****************************************************************************\
121   *                                                                           *
122   * Low level functions                                                       *
123   *                                                                           *
124  \*****************************************************************************/
125  
sdhci_do_enable_v4_mode(struct sdhci_host * host)126  static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127  {
128  	u16 ctrl2;
129  
130  	ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131  	if (ctrl2 & SDHCI_CTRL_V4_MODE)
132  		return;
133  
134  	ctrl2 |= SDHCI_CTRL_V4_MODE;
135  	sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136  }
137  
138  /*
139   * This can be called before sdhci_add_host() by Vendor's host controller
140   * driver to enable v4 mode if supported.
141   */
sdhci_enable_v4_mode(struct sdhci_host * host)142  void sdhci_enable_v4_mode(struct sdhci_host *host)
143  {
144  	host->v4_mode = true;
145  	sdhci_do_enable_v4_mode(host);
146  }
147  EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148  
sdhci_data_line_cmd(struct mmc_command * cmd)149  static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150  {
151  	return cmd->data || cmd->flags & MMC_RSP_BUSY;
152  }
153  
sdhci_set_card_detection(struct sdhci_host * host,bool enable)154  static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155  {
156  	u32 present;
157  
158  	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159  	    !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160  		return;
161  
162  	if (enable) {
163  		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164  				      SDHCI_CARD_PRESENT;
165  
166  		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167  				       SDHCI_INT_CARD_INSERT;
168  	} else {
169  		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170  	}
171  
172  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174  }
175  
sdhci_enable_card_detection(struct sdhci_host * host)176  static void sdhci_enable_card_detection(struct sdhci_host *host)
177  {
178  	sdhci_set_card_detection(host, true);
179  }
180  
sdhci_disable_card_detection(struct sdhci_host * host)181  static void sdhci_disable_card_detection(struct sdhci_host *host)
182  {
183  	sdhci_set_card_detection(host, false);
184  }
185  
sdhci_runtime_pm_bus_on(struct sdhci_host * host)186  static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187  {
188  	if (host->bus_on)
189  		return;
190  	host->bus_on = true;
191  	pm_runtime_get_noresume(mmc_dev(host->mmc));
192  }
193  
sdhci_runtime_pm_bus_off(struct sdhci_host * host)194  static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195  {
196  	if (!host->bus_on)
197  		return;
198  	host->bus_on = false;
199  	pm_runtime_put_noidle(mmc_dev(host->mmc));
200  }
201  
sdhci_reset(struct sdhci_host * host,u8 mask)202  void sdhci_reset(struct sdhci_host *host, u8 mask)
203  {
204  	ktime_t timeout;
205  
206  	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207  
208  	if (mask & SDHCI_RESET_ALL) {
209  		host->clock = 0;
210  		/* Reset-all turns off SD Bus Power */
211  		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212  			sdhci_runtime_pm_bus_off(host);
213  	}
214  
215  	/* Wait max 100 ms */
216  	timeout = ktime_add_ms(ktime_get(), 100);
217  
218  	/* hw clears the bit when it's done */
219  	while (1) {
220  		bool timedout = ktime_after(ktime_get(), timeout);
221  
222  		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223  			break;
224  		if (timedout) {
225  			pr_err("%s: Reset 0x%x never completed.\n",
226  				mmc_hostname(host->mmc), (int)mask);
227  			sdhci_err_stats_inc(host, CTRL_TIMEOUT);
228  			sdhci_dumpregs(host);
229  			return;
230  		}
231  		udelay(10);
232  	}
233  }
234  EXPORT_SYMBOL_GPL(sdhci_reset);
235  
sdhci_do_reset(struct sdhci_host * host,u8 mask)236  static bool sdhci_do_reset(struct sdhci_host *host, u8 mask)
237  {
238  	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
239  		struct mmc_host *mmc = host->mmc;
240  
241  		if (!mmc->ops->get_cd(mmc))
242  			return false;
243  	}
244  
245  	host->ops->reset(host, mask);
246  
247  	return true;
248  }
249  
sdhci_reset_for_all(struct sdhci_host * host)250  static void sdhci_reset_for_all(struct sdhci_host *host)
251  {
252  	if (sdhci_do_reset(host, SDHCI_RESET_ALL)) {
253  		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
254  			if (host->ops->enable_dma)
255  				host->ops->enable_dma(host);
256  		}
257  		/* Resetting the controller clears many */
258  		host->preset_enabled = false;
259  	}
260  }
261  
262  enum sdhci_reset_reason {
263  	SDHCI_RESET_FOR_INIT,
264  	SDHCI_RESET_FOR_REQUEST_ERROR,
265  	SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY,
266  	SDHCI_RESET_FOR_TUNING_ABORT,
267  	SDHCI_RESET_FOR_CARD_REMOVED,
268  	SDHCI_RESET_FOR_CQE_RECOVERY,
269  };
270  
sdhci_reset_for_reason(struct sdhci_host * host,enum sdhci_reset_reason reason)271  static void sdhci_reset_for_reason(struct sdhci_host *host, enum sdhci_reset_reason reason)
272  {
273  	if (host->quirks2 & SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER) {
274  		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
275  		return;
276  	}
277  
278  	switch (reason) {
279  	case SDHCI_RESET_FOR_INIT:
280  		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
281  		break;
282  	case SDHCI_RESET_FOR_REQUEST_ERROR:
283  	case SDHCI_RESET_FOR_TUNING_ABORT:
284  	case SDHCI_RESET_FOR_CARD_REMOVED:
285  	case SDHCI_RESET_FOR_CQE_RECOVERY:
286  		sdhci_do_reset(host, SDHCI_RESET_CMD);
287  		sdhci_do_reset(host, SDHCI_RESET_DATA);
288  		break;
289  	case SDHCI_RESET_FOR_REQUEST_ERROR_DATA_ONLY:
290  		sdhci_do_reset(host, SDHCI_RESET_DATA);
291  		break;
292  	}
293  }
294  
295  #define sdhci_reset_for(h, r) sdhci_reset_for_reason((h), SDHCI_RESET_FOR_##r)
296  
sdhci_set_default_irqs(struct sdhci_host * host)297  static void sdhci_set_default_irqs(struct sdhci_host *host)
298  {
299  	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
300  		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
301  		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
302  		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
303  		    SDHCI_INT_RESPONSE;
304  
305  	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
306  	    host->tuning_mode == SDHCI_TUNING_MODE_3)
307  		host->ier |= SDHCI_INT_RETUNE;
308  
309  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
310  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
311  }
312  
sdhci_config_dma(struct sdhci_host * host)313  static void sdhci_config_dma(struct sdhci_host *host)
314  {
315  	u8 ctrl;
316  	u16 ctrl2;
317  
318  	if (host->version < SDHCI_SPEC_200)
319  		return;
320  
321  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
322  
323  	/*
324  	 * Always adjust the DMA selection as some controllers
325  	 * (e.g. JMicron) can't do PIO properly when the selection
326  	 * is ADMA.
327  	 */
328  	ctrl &= ~SDHCI_CTRL_DMA_MASK;
329  	if (!(host->flags & SDHCI_REQ_USE_DMA))
330  		goto out;
331  
332  	/* Note if DMA Select is zero then SDMA is selected */
333  	if (host->flags & SDHCI_USE_ADMA)
334  		ctrl |= SDHCI_CTRL_ADMA32;
335  
336  	if (host->flags & SDHCI_USE_64_BIT_DMA) {
337  		/*
338  		 * If v4 mode, all supported DMA can be 64-bit addressing if
339  		 * controller supports 64-bit system address, otherwise only
340  		 * ADMA can support 64-bit addressing.
341  		 */
342  		if (host->v4_mode) {
343  			ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
344  			ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
345  			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
346  		} else if (host->flags & SDHCI_USE_ADMA) {
347  			/*
348  			 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
349  			 * set SDHCI_CTRL_ADMA64.
350  			 */
351  			ctrl |= SDHCI_CTRL_ADMA64;
352  		}
353  	}
354  
355  out:
356  	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
357  }
358  
sdhci_init(struct sdhci_host * host,int soft)359  static void sdhci_init(struct sdhci_host *host, int soft)
360  {
361  	struct mmc_host *mmc = host->mmc;
362  	unsigned long flags;
363  
364  	if (soft)
365  		sdhci_reset_for(host, INIT);
366  	else
367  		sdhci_reset_for_all(host);
368  
369  	if (host->v4_mode)
370  		sdhci_do_enable_v4_mode(host);
371  
372  	spin_lock_irqsave(&host->lock, flags);
373  	sdhci_set_default_irqs(host);
374  	spin_unlock_irqrestore(&host->lock, flags);
375  
376  	host->cqe_on = false;
377  
378  	if (soft) {
379  		/* force clock reconfiguration */
380  		host->clock = 0;
381  		host->reinit_uhs = true;
382  		mmc->ops->set_ios(mmc, &mmc->ios);
383  	}
384  }
385  
sdhci_reinit(struct sdhci_host * host)386  static void sdhci_reinit(struct sdhci_host *host)
387  {
388  	u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
389  
390  	sdhci_init(host, 0);
391  	sdhci_enable_card_detection(host);
392  
393  	/*
394  	 * A change to the card detect bits indicates a change in present state,
395  	 * refer sdhci_set_card_detection(). A card detect interrupt might have
396  	 * been missed while the host controller was being reset, so trigger a
397  	 * rescan to check.
398  	 */
399  	if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
400  		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
401  }
402  
__sdhci_led_activate(struct sdhci_host * host)403  static void __sdhci_led_activate(struct sdhci_host *host)
404  {
405  	u8 ctrl;
406  
407  	if (host->quirks & SDHCI_QUIRK_NO_LED)
408  		return;
409  
410  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
411  	ctrl |= SDHCI_CTRL_LED;
412  	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
413  }
414  
__sdhci_led_deactivate(struct sdhci_host * host)415  static void __sdhci_led_deactivate(struct sdhci_host *host)
416  {
417  	u8 ctrl;
418  
419  	if (host->quirks & SDHCI_QUIRK_NO_LED)
420  		return;
421  
422  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
423  	ctrl &= ~SDHCI_CTRL_LED;
424  	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
425  }
426  
427  #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)428  static void sdhci_led_control(struct led_classdev *led,
429  			      enum led_brightness brightness)
430  {
431  	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
432  	unsigned long flags;
433  
434  	spin_lock_irqsave(&host->lock, flags);
435  
436  	if (host->runtime_suspended)
437  		goto out;
438  
439  	if (brightness == LED_OFF)
440  		__sdhci_led_deactivate(host);
441  	else
442  		__sdhci_led_activate(host);
443  out:
444  	spin_unlock_irqrestore(&host->lock, flags);
445  }
446  
sdhci_led_register(struct sdhci_host * host)447  static int sdhci_led_register(struct sdhci_host *host)
448  {
449  	struct mmc_host *mmc = host->mmc;
450  
451  	if (host->quirks & SDHCI_QUIRK_NO_LED)
452  		return 0;
453  
454  	snprintf(host->led_name, sizeof(host->led_name),
455  		 "%s::", mmc_hostname(mmc));
456  
457  	host->led.name = host->led_name;
458  	host->led.brightness = LED_OFF;
459  	host->led.default_trigger = mmc_hostname(mmc);
460  	host->led.brightness_set = sdhci_led_control;
461  
462  	return led_classdev_register(mmc_dev(mmc), &host->led);
463  }
464  
sdhci_led_unregister(struct sdhci_host * host)465  static void sdhci_led_unregister(struct sdhci_host *host)
466  {
467  	if (host->quirks & SDHCI_QUIRK_NO_LED)
468  		return;
469  
470  	led_classdev_unregister(&host->led);
471  }
472  
sdhci_led_activate(struct sdhci_host * host)473  static inline void sdhci_led_activate(struct sdhci_host *host)
474  {
475  }
476  
sdhci_led_deactivate(struct sdhci_host * host)477  static inline void sdhci_led_deactivate(struct sdhci_host *host)
478  {
479  }
480  
481  #else
482  
sdhci_led_register(struct sdhci_host * host)483  static inline int sdhci_led_register(struct sdhci_host *host)
484  {
485  	return 0;
486  }
487  
sdhci_led_unregister(struct sdhci_host * host)488  static inline void sdhci_led_unregister(struct sdhci_host *host)
489  {
490  }
491  
sdhci_led_activate(struct sdhci_host * host)492  static inline void sdhci_led_activate(struct sdhci_host *host)
493  {
494  	__sdhci_led_activate(host);
495  }
496  
sdhci_led_deactivate(struct sdhci_host * host)497  static inline void sdhci_led_deactivate(struct sdhci_host *host)
498  {
499  	__sdhci_led_deactivate(host);
500  }
501  
502  #endif
503  
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)504  static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
505  			    unsigned long timeout)
506  {
507  	if (sdhci_data_line_cmd(mrq->cmd))
508  		mod_timer(&host->data_timer, timeout);
509  	else
510  		mod_timer(&host->timer, timeout);
511  }
512  
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)513  static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
514  {
515  	if (sdhci_data_line_cmd(mrq->cmd))
516  		del_timer(&host->data_timer);
517  	else
518  		del_timer(&host->timer);
519  }
520  
sdhci_has_requests(struct sdhci_host * host)521  static inline bool sdhci_has_requests(struct sdhci_host *host)
522  {
523  	return host->cmd || host->data_cmd;
524  }
525  
526  /*****************************************************************************\
527   *                                                                           *
528   * Core functions                                                            *
529   *                                                                           *
530  \*****************************************************************************/
531  
sdhci_read_block_pio(struct sdhci_host * host)532  static void sdhci_read_block_pio(struct sdhci_host *host)
533  {
534  	size_t blksize, len, chunk;
535  	u32 scratch;
536  	u8 *buf;
537  
538  	DBG("PIO reading\n");
539  
540  	blksize = host->data->blksz;
541  	chunk = 0;
542  
543  	while (blksize) {
544  		BUG_ON(!sg_miter_next(&host->sg_miter));
545  
546  		len = min(host->sg_miter.length, blksize);
547  
548  		blksize -= len;
549  		host->sg_miter.consumed = len;
550  
551  		buf = host->sg_miter.addr;
552  
553  		while (len) {
554  			if (chunk == 0) {
555  				scratch = sdhci_readl(host, SDHCI_BUFFER);
556  				chunk = 4;
557  			}
558  
559  			*buf = scratch & 0xFF;
560  
561  			buf++;
562  			scratch >>= 8;
563  			chunk--;
564  			len--;
565  		}
566  	}
567  
568  	sg_miter_stop(&host->sg_miter);
569  }
570  
sdhci_write_block_pio(struct sdhci_host * host)571  static void sdhci_write_block_pio(struct sdhci_host *host)
572  {
573  	size_t blksize, len, chunk;
574  	u32 scratch;
575  	u8 *buf;
576  
577  	DBG("PIO writing\n");
578  
579  	blksize = host->data->blksz;
580  	chunk = 0;
581  	scratch = 0;
582  
583  	while (blksize) {
584  		BUG_ON(!sg_miter_next(&host->sg_miter));
585  
586  		len = min(host->sg_miter.length, blksize);
587  
588  		blksize -= len;
589  		host->sg_miter.consumed = len;
590  
591  		buf = host->sg_miter.addr;
592  
593  		while (len) {
594  			scratch |= (u32)*buf << (chunk * 8);
595  
596  			buf++;
597  			chunk++;
598  			len--;
599  
600  			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
601  				sdhci_writel(host, scratch, SDHCI_BUFFER);
602  				chunk = 0;
603  				scratch = 0;
604  			}
605  		}
606  	}
607  
608  	sg_miter_stop(&host->sg_miter);
609  }
610  
sdhci_transfer_pio(struct sdhci_host * host)611  static void sdhci_transfer_pio(struct sdhci_host *host)
612  {
613  	u32 mask;
614  
615  	if (host->blocks == 0)
616  		return;
617  
618  	if (host->data->flags & MMC_DATA_READ)
619  		mask = SDHCI_DATA_AVAILABLE;
620  	else
621  		mask = SDHCI_SPACE_AVAILABLE;
622  
623  	/*
624  	 * Some controllers (JMicron JMB38x) mess up the buffer bits
625  	 * for transfers < 4 bytes. As long as it is just one block,
626  	 * we can ignore the bits.
627  	 */
628  	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
629  		(host->data->blocks == 1))
630  		mask = ~0;
631  
632  	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
633  		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
634  			udelay(100);
635  
636  		if (host->data->flags & MMC_DATA_READ)
637  			sdhci_read_block_pio(host);
638  		else
639  			sdhci_write_block_pio(host);
640  
641  		host->blocks--;
642  		if (host->blocks == 0)
643  			break;
644  	}
645  
646  	DBG("PIO transfer complete.\n");
647  }
648  
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)649  static int sdhci_pre_dma_transfer(struct sdhci_host *host,
650  				  struct mmc_data *data, int cookie)
651  {
652  	int sg_count;
653  
654  	/*
655  	 * If the data buffers are already mapped, return the previous
656  	 * dma_map_sg() result.
657  	 */
658  	if (data->host_cookie == COOKIE_PRE_MAPPED)
659  		return data->sg_count;
660  
661  	/* Bounce write requests to the bounce buffer */
662  	if (host->bounce_buffer) {
663  		unsigned int length = data->blksz * data->blocks;
664  
665  		if (length > host->bounce_buffer_size) {
666  			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
667  			       mmc_hostname(host->mmc), length,
668  			       host->bounce_buffer_size);
669  			return -EIO;
670  		}
671  		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
672  			/* Copy the data to the bounce buffer */
673  			if (host->ops->copy_to_bounce_buffer) {
674  				host->ops->copy_to_bounce_buffer(host,
675  								 data, length);
676  			} else {
677  				sg_copy_to_buffer(data->sg, data->sg_len,
678  						  host->bounce_buffer, length);
679  			}
680  		}
681  		/* Switch ownership to the DMA */
682  		dma_sync_single_for_device(mmc_dev(host->mmc),
683  					   host->bounce_addr,
684  					   host->bounce_buffer_size,
685  					   mmc_get_dma_dir(data));
686  		/* Just a dummy value */
687  		sg_count = 1;
688  	} else {
689  		/* Just access the data directly from memory */
690  		sg_count = dma_map_sg(mmc_dev(host->mmc),
691  				      data->sg, data->sg_len,
692  				      mmc_get_dma_dir(data));
693  	}
694  
695  	if (sg_count == 0)
696  		return -ENOSPC;
697  
698  	data->sg_count = sg_count;
699  	data->host_cookie = cookie;
700  
701  	return sg_count;
702  }
703  
sdhci_kmap_atomic(struct scatterlist * sg)704  static char *sdhci_kmap_atomic(struct scatterlist *sg)
705  {
706  	return kmap_local_page(sg_page(sg)) + sg->offset;
707  }
708  
sdhci_kunmap_atomic(void * buffer)709  static void sdhci_kunmap_atomic(void *buffer)
710  {
711  	kunmap_local(buffer);
712  }
713  
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)714  void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
715  			   dma_addr_t addr, int len, unsigned int cmd)
716  {
717  	struct sdhci_adma2_64_desc *dma_desc = *desc;
718  
719  	/* 32-bit and 64-bit descriptors have these members in same position */
720  	dma_desc->cmd = cpu_to_le16(cmd);
721  	dma_desc->len = cpu_to_le16(len);
722  	dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
723  
724  	if (host->flags & SDHCI_USE_64_BIT_DMA)
725  		dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
726  
727  	*desc += host->desc_sz;
728  }
729  EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
730  
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)731  static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
732  					   void **desc, dma_addr_t addr,
733  					   int len, unsigned int cmd)
734  {
735  	if (host->ops->adma_write_desc)
736  		host->ops->adma_write_desc(host, desc, addr, len, cmd);
737  	else
738  		sdhci_adma_write_desc(host, desc, addr, len, cmd);
739  }
740  
sdhci_adma_mark_end(void * desc)741  static void sdhci_adma_mark_end(void *desc)
742  {
743  	struct sdhci_adma2_64_desc *dma_desc = desc;
744  
745  	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
746  	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
747  }
748  
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)749  static void sdhci_adma_table_pre(struct sdhci_host *host,
750  	struct mmc_data *data, int sg_count)
751  {
752  	struct scatterlist *sg;
753  	dma_addr_t addr, align_addr;
754  	void *desc, *align;
755  	char *buffer;
756  	int len, offset, i;
757  
758  	/*
759  	 * The spec does not specify endianness of descriptor table.
760  	 * We currently guess that it is LE.
761  	 */
762  
763  	host->sg_count = sg_count;
764  
765  	desc = host->adma_table;
766  	align = host->align_buffer;
767  
768  	align_addr = host->align_addr;
769  
770  	for_each_sg(data->sg, sg, host->sg_count, i) {
771  		addr = sg_dma_address(sg);
772  		len = sg_dma_len(sg);
773  
774  		/*
775  		 * The SDHCI specification states that ADMA addresses must
776  		 * be 32-bit aligned. If they aren't, then we use a bounce
777  		 * buffer for the (up to three) bytes that screw up the
778  		 * alignment.
779  		 */
780  		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
781  			 SDHCI_ADMA2_MASK;
782  		if (offset) {
783  			if (data->flags & MMC_DATA_WRITE) {
784  				buffer = sdhci_kmap_atomic(sg);
785  				memcpy(align, buffer, offset);
786  				sdhci_kunmap_atomic(buffer);
787  			}
788  
789  			/* tran, valid */
790  			__sdhci_adma_write_desc(host, &desc, align_addr,
791  						offset, ADMA2_TRAN_VALID);
792  
793  			BUG_ON(offset > 65536);
794  
795  			align += SDHCI_ADMA2_ALIGN;
796  			align_addr += SDHCI_ADMA2_ALIGN;
797  
798  			addr += offset;
799  			len -= offset;
800  		}
801  
802  		/*
803  		 * The block layer forces a minimum segment size of PAGE_SIZE,
804  		 * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
805  		 * multiple descriptors, noting that the ADMA table is sized
806  		 * for 4KiB chunks anyway, so it will be big enough.
807  		 */
808  		while (len > host->max_adma) {
809  			int n = 32 * 1024; /* 32KiB*/
810  
811  			__sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
812  			addr += n;
813  			len -= n;
814  		}
815  
816  		/* tran, valid */
817  		if (len)
818  			__sdhci_adma_write_desc(host, &desc, addr, len,
819  						ADMA2_TRAN_VALID);
820  
821  		/*
822  		 * If this triggers then we have a calculation bug
823  		 * somewhere. :/
824  		 */
825  		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
826  	}
827  
828  	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
829  		/* Mark the last descriptor as the terminating descriptor */
830  		if (desc != host->adma_table) {
831  			desc -= host->desc_sz;
832  			sdhci_adma_mark_end(desc);
833  		}
834  	} else {
835  		/* Add a terminating entry - nop, end, valid */
836  		__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
837  	}
838  }
839  
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)840  static void sdhci_adma_table_post(struct sdhci_host *host,
841  	struct mmc_data *data)
842  {
843  	struct scatterlist *sg;
844  	int i, size;
845  	void *align;
846  	char *buffer;
847  
848  	if (data->flags & MMC_DATA_READ) {
849  		bool has_unaligned = false;
850  
851  		/* Do a quick scan of the SG list for any unaligned mappings */
852  		for_each_sg(data->sg, sg, host->sg_count, i)
853  			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
854  				has_unaligned = true;
855  				break;
856  			}
857  
858  		if (has_unaligned) {
859  			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
860  					    data->sg_len, DMA_FROM_DEVICE);
861  
862  			align = host->align_buffer;
863  
864  			for_each_sg(data->sg, sg, host->sg_count, i) {
865  				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
866  					size = SDHCI_ADMA2_ALIGN -
867  					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
868  
869  					buffer = sdhci_kmap_atomic(sg);
870  					memcpy(buffer, align, size);
871  					sdhci_kunmap_atomic(buffer);
872  
873  					align += SDHCI_ADMA2_ALIGN;
874  				}
875  			}
876  		}
877  	}
878  }
879  
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)880  static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
881  {
882  	sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
883  	if (host->flags & SDHCI_USE_64_BIT_DMA)
884  		sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
885  }
886  
sdhci_sdma_address(struct sdhci_host * host)887  static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
888  {
889  	if (host->bounce_buffer)
890  		return host->bounce_addr;
891  	else
892  		return sg_dma_address(host->data->sg);
893  }
894  
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)895  static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
896  {
897  	if (host->v4_mode)
898  		sdhci_set_adma_addr(host, addr);
899  	else
900  		sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
901  }
902  
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)903  static unsigned int sdhci_target_timeout(struct sdhci_host *host,
904  					 struct mmc_command *cmd,
905  					 struct mmc_data *data)
906  {
907  	unsigned int target_timeout;
908  
909  	/* timeout in us */
910  	if (!data) {
911  		target_timeout = cmd->busy_timeout * 1000;
912  	} else {
913  		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
914  		if (host->clock && data->timeout_clks) {
915  			unsigned long long val;
916  
917  			/*
918  			 * data->timeout_clks is in units of clock cycles.
919  			 * host->clock is in Hz.  target_timeout is in us.
920  			 * Hence, us = 1000000 * cycles / Hz.  Round up.
921  			 */
922  			val = 1000000ULL * data->timeout_clks;
923  			if (do_div(val, host->clock))
924  				target_timeout++;
925  			target_timeout += val;
926  		}
927  	}
928  
929  	return target_timeout;
930  }
931  
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)932  static void sdhci_calc_sw_timeout(struct sdhci_host *host,
933  				  struct mmc_command *cmd)
934  {
935  	struct mmc_data *data = cmd->data;
936  	struct mmc_host *mmc = host->mmc;
937  	struct mmc_ios *ios = &mmc->ios;
938  	unsigned char bus_width = 1 << ios->bus_width;
939  	unsigned int blksz;
940  	unsigned int freq;
941  	u64 target_timeout;
942  	u64 transfer_time;
943  
944  	target_timeout = sdhci_target_timeout(host, cmd, data);
945  	target_timeout *= NSEC_PER_USEC;
946  
947  	if (data) {
948  		blksz = data->blksz;
949  		freq = mmc->actual_clock ? : host->clock;
950  		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
951  		do_div(transfer_time, freq);
952  		/* multiply by '2' to account for any unknowns */
953  		transfer_time = transfer_time * 2;
954  		/* calculate timeout for the entire data */
955  		host->data_timeout = data->blocks * target_timeout +
956  				     transfer_time;
957  	} else {
958  		host->data_timeout = target_timeout;
959  	}
960  
961  	if (host->data_timeout)
962  		host->data_timeout += MMC_CMD_TRANSFER_TIME;
963  }
964  
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)965  static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
966  			     bool *too_big)
967  {
968  	u8 count;
969  	struct mmc_data *data;
970  	unsigned target_timeout, current_timeout;
971  
972  	*too_big = false;
973  
974  	/*
975  	 * If the host controller provides us with an incorrect timeout
976  	 * value, just skip the check and use the maximum. The hardware may take
977  	 * longer to time out, but that's much better than having a too-short
978  	 * timeout value.
979  	 */
980  	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
981  		return host->max_timeout_count;
982  
983  	/* Unspecified command, assume max */
984  	if (cmd == NULL)
985  		return host->max_timeout_count;
986  
987  	data = cmd->data;
988  	/* Unspecified timeout, assume max */
989  	if (!data && !cmd->busy_timeout)
990  		return host->max_timeout_count;
991  
992  	/* timeout in us */
993  	target_timeout = sdhci_target_timeout(host, cmd, data);
994  
995  	/*
996  	 * Figure out needed cycles.
997  	 * We do this in steps in order to fit inside a 32 bit int.
998  	 * The first step is the minimum timeout, which will have a
999  	 * minimum resolution of 6 bits:
1000  	 * (1) 2^13*1000 > 2^22,
1001  	 * (2) host->timeout_clk < 2^16
1002  	 *     =>
1003  	 *     (1) / (2) > 2^6
1004  	 */
1005  	count = 0;
1006  	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
1007  	while (current_timeout < target_timeout) {
1008  		count++;
1009  		current_timeout <<= 1;
1010  		if (count > host->max_timeout_count) {
1011  			if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
1012  				DBG("Too large timeout 0x%x requested for CMD%d!\n",
1013  				    count, cmd->opcode);
1014  			count = host->max_timeout_count;
1015  			*too_big = true;
1016  			break;
1017  		}
1018  	}
1019  
1020  	return count;
1021  }
1022  
sdhci_set_transfer_irqs(struct sdhci_host * host)1023  static void sdhci_set_transfer_irqs(struct sdhci_host *host)
1024  {
1025  	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
1026  	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
1027  
1028  	if (host->flags & SDHCI_REQ_USE_DMA)
1029  		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
1030  	else
1031  		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
1032  
1033  	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
1034  		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
1035  	else
1036  		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1037  
1038  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1039  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1040  }
1041  
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1042  void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1043  {
1044  	if (enable)
1045  		host->ier |= SDHCI_INT_DATA_TIMEOUT;
1046  	else
1047  		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1048  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1049  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1050  }
1051  EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1052  
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1053  void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1054  {
1055  	bool too_big = false;
1056  	u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1057  
1058  	if (too_big &&
1059  	    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1060  		sdhci_calc_sw_timeout(host, cmd);
1061  		sdhci_set_data_timeout_irq(host, false);
1062  	} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1063  		sdhci_set_data_timeout_irq(host, true);
1064  	}
1065  
1066  	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1067  }
1068  EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1069  
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1070  static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1071  {
1072  	if (host->ops->set_timeout)
1073  		host->ops->set_timeout(host, cmd);
1074  	else
1075  		__sdhci_set_timeout(host, cmd);
1076  }
1077  
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1078  static void sdhci_initialize_data(struct sdhci_host *host,
1079  				  struct mmc_data *data)
1080  {
1081  	WARN_ON(host->data);
1082  
1083  	/* Sanity checks */
1084  	BUG_ON(data->blksz * data->blocks > 524288);
1085  	BUG_ON(data->blksz > host->mmc->max_blk_size);
1086  	BUG_ON(data->blocks > 65535);
1087  
1088  	host->data = data;
1089  	host->data_early = 0;
1090  	host->data->bytes_xfered = 0;
1091  }
1092  
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1093  static inline void sdhci_set_block_info(struct sdhci_host *host,
1094  					struct mmc_data *data)
1095  {
1096  	/* Set the DMA boundary value and block size */
1097  	sdhci_writew(host,
1098  		     SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1099  		     SDHCI_BLOCK_SIZE);
1100  	/*
1101  	 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1102  	 * can be supported, in that case 16-bit block count register must be 0.
1103  	 */
1104  	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1105  	    (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1106  		if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1107  			sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1108  		sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1109  	} else {
1110  		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1111  	}
1112  }
1113  
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1114  static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1115  {
1116  	struct mmc_data *data = cmd->data;
1117  
1118  	sdhci_initialize_data(host, data);
1119  
1120  	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1121  		struct scatterlist *sg;
1122  		unsigned int length_mask, offset_mask;
1123  		int i;
1124  
1125  		host->flags |= SDHCI_REQ_USE_DMA;
1126  
1127  		/*
1128  		 * FIXME: This doesn't account for merging when mapping the
1129  		 * scatterlist.
1130  		 *
1131  		 * The assumption here being that alignment and lengths are
1132  		 * the same after DMA mapping to device address space.
1133  		 */
1134  		length_mask = 0;
1135  		offset_mask = 0;
1136  		if (host->flags & SDHCI_USE_ADMA) {
1137  			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1138  				length_mask = 3;
1139  				/*
1140  				 * As we use up to 3 byte chunks to work
1141  				 * around alignment problems, we need to
1142  				 * check the offset as well.
1143  				 */
1144  				offset_mask = 3;
1145  			}
1146  		} else {
1147  			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1148  				length_mask = 3;
1149  			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1150  				offset_mask = 3;
1151  		}
1152  
1153  		if (unlikely(length_mask | offset_mask)) {
1154  			for_each_sg(data->sg, sg, data->sg_len, i) {
1155  				if (sg->length & length_mask) {
1156  					DBG("Reverting to PIO because of transfer size (%d)\n",
1157  					    sg->length);
1158  					host->flags &= ~SDHCI_REQ_USE_DMA;
1159  					break;
1160  				}
1161  				if (sg->offset & offset_mask) {
1162  					DBG("Reverting to PIO because of bad alignment\n");
1163  					host->flags &= ~SDHCI_REQ_USE_DMA;
1164  					break;
1165  				}
1166  			}
1167  		}
1168  	}
1169  
1170  	sdhci_config_dma(host);
1171  
1172  	if (host->flags & SDHCI_REQ_USE_DMA) {
1173  		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1174  
1175  		if (sg_cnt <= 0) {
1176  			/*
1177  			 * This only happens when someone fed
1178  			 * us an invalid request.
1179  			 */
1180  			WARN_ON(1);
1181  			host->flags &= ~SDHCI_REQ_USE_DMA;
1182  		} else if (host->flags & SDHCI_USE_ADMA) {
1183  			sdhci_adma_table_pre(host, data, sg_cnt);
1184  			sdhci_set_adma_addr(host, host->adma_addr);
1185  		} else {
1186  			WARN_ON(sg_cnt != 1);
1187  			sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1188  		}
1189  	}
1190  
1191  	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1192  		int flags;
1193  
1194  		flags = SG_MITER_ATOMIC;
1195  		if (host->data->flags & MMC_DATA_READ)
1196  			flags |= SG_MITER_TO_SG;
1197  		else
1198  			flags |= SG_MITER_FROM_SG;
1199  		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1200  		host->blocks = data->blocks;
1201  	}
1202  
1203  	sdhci_set_transfer_irqs(host);
1204  
1205  	sdhci_set_block_info(host, data);
1206  }
1207  
1208  #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1209  
sdhci_external_dma_init(struct sdhci_host * host)1210  static int sdhci_external_dma_init(struct sdhci_host *host)
1211  {
1212  	int ret = 0;
1213  	struct mmc_host *mmc = host->mmc;
1214  
1215  	host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1216  	if (IS_ERR(host->tx_chan)) {
1217  		ret = PTR_ERR(host->tx_chan);
1218  		if (ret != -EPROBE_DEFER)
1219  			pr_warn("Failed to request TX DMA channel.\n");
1220  		host->tx_chan = NULL;
1221  		return ret;
1222  	}
1223  
1224  	host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1225  	if (IS_ERR(host->rx_chan)) {
1226  		if (host->tx_chan) {
1227  			dma_release_channel(host->tx_chan);
1228  			host->tx_chan = NULL;
1229  		}
1230  
1231  		ret = PTR_ERR(host->rx_chan);
1232  		if (ret != -EPROBE_DEFER)
1233  			pr_warn("Failed to request RX DMA channel.\n");
1234  		host->rx_chan = NULL;
1235  	}
1236  
1237  	return ret;
1238  }
1239  
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1240  static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1241  						   struct mmc_data *data)
1242  {
1243  	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1244  }
1245  
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1246  static int sdhci_external_dma_setup(struct sdhci_host *host,
1247  				    struct mmc_command *cmd)
1248  {
1249  	int ret, i;
1250  	enum dma_transfer_direction dir;
1251  	struct dma_async_tx_descriptor *desc;
1252  	struct mmc_data *data = cmd->data;
1253  	struct dma_chan *chan;
1254  	struct dma_slave_config cfg;
1255  	dma_cookie_t cookie;
1256  	int sg_cnt;
1257  
1258  	if (!host->mapbase)
1259  		return -EINVAL;
1260  
1261  	memset(&cfg, 0, sizeof(cfg));
1262  	cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1263  	cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1264  	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1265  	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1266  	cfg.src_maxburst = data->blksz / 4;
1267  	cfg.dst_maxburst = data->blksz / 4;
1268  
1269  	/* Sanity check: all the SG entries must be aligned by block size. */
1270  	for (i = 0; i < data->sg_len; i++) {
1271  		if ((data->sg + i)->length % data->blksz)
1272  			return -EINVAL;
1273  	}
1274  
1275  	chan = sdhci_external_dma_channel(host, data);
1276  
1277  	ret = dmaengine_slave_config(chan, &cfg);
1278  	if (ret)
1279  		return ret;
1280  
1281  	sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1282  	if (sg_cnt <= 0)
1283  		return -EINVAL;
1284  
1285  	dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1286  	desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1287  				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1288  	if (!desc)
1289  		return -EINVAL;
1290  
1291  	desc->callback = NULL;
1292  	desc->callback_param = NULL;
1293  
1294  	cookie = dmaengine_submit(desc);
1295  	if (dma_submit_error(cookie))
1296  		ret = cookie;
1297  
1298  	return ret;
1299  }
1300  
sdhci_external_dma_release(struct sdhci_host * host)1301  static void sdhci_external_dma_release(struct sdhci_host *host)
1302  {
1303  	if (host->tx_chan) {
1304  		dma_release_channel(host->tx_chan);
1305  		host->tx_chan = NULL;
1306  	}
1307  
1308  	if (host->rx_chan) {
1309  		dma_release_channel(host->rx_chan);
1310  		host->rx_chan = NULL;
1311  	}
1312  
1313  	sdhci_switch_external_dma(host, false);
1314  }
1315  
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1316  static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1317  					      struct mmc_command *cmd)
1318  {
1319  	struct mmc_data *data = cmd->data;
1320  
1321  	sdhci_initialize_data(host, data);
1322  
1323  	host->flags |= SDHCI_REQ_USE_DMA;
1324  	sdhci_set_transfer_irqs(host);
1325  
1326  	sdhci_set_block_info(host, data);
1327  }
1328  
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1329  static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1330  					    struct mmc_command *cmd)
1331  {
1332  	if (!sdhci_external_dma_setup(host, cmd)) {
1333  		__sdhci_external_dma_prepare_data(host, cmd);
1334  	} else {
1335  		sdhci_external_dma_release(host);
1336  		pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1337  		       mmc_hostname(host->mmc));
1338  		sdhci_prepare_data(host, cmd);
1339  	}
1340  }
1341  
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1342  static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1343  					    struct mmc_command *cmd)
1344  {
1345  	struct dma_chan *chan;
1346  
1347  	if (!cmd->data)
1348  		return;
1349  
1350  	chan = sdhci_external_dma_channel(host, cmd->data);
1351  	if (chan)
1352  		dma_async_issue_pending(chan);
1353  }
1354  
1355  #else
1356  
sdhci_external_dma_init(struct sdhci_host * host)1357  static inline int sdhci_external_dma_init(struct sdhci_host *host)
1358  {
1359  	return -EOPNOTSUPP;
1360  }
1361  
sdhci_external_dma_release(struct sdhci_host * host)1362  static inline void sdhci_external_dma_release(struct sdhci_host *host)
1363  {
1364  }
1365  
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1366  static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1367  						   struct mmc_command *cmd)
1368  {
1369  	/* This should never happen */
1370  	WARN_ON_ONCE(1);
1371  }
1372  
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1373  static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1374  						   struct mmc_command *cmd)
1375  {
1376  }
1377  
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1378  static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1379  							  struct mmc_data *data)
1380  {
1381  	return NULL;
1382  }
1383  
1384  #endif
1385  
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1386  void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1387  {
1388  	host->use_external_dma = en;
1389  }
1390  EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1391  
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1392  static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1393  				    struct mmc_request *mrq)
1394  {
1395  	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1396  	       !mrq->cap_cmd_during_tfr;
1397  }
1398  
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1399  static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1400  				    struct mmc_request *mrq)
1401  {
1402  	return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1403  }
1404  
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1405  static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1406  				      struct mmc_request *mrq)
1407  {
1408  	return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1409  }
1410  
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1411  static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1412  					 struct mmc_command *cmd,
1413  					 u16 *mode)
1414  {
1415  	bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1416  			 (cmd->opcode != SD_IO_RW_EXTENDED);
1417  	bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1418  	u16 ctrl2;
1419  
1420  	/*
1421  	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1422  	 * Select' is recommended rather than use of 'Auto CMD12
1423  	 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1424  	 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1425  	 */
1426  	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1427  	    (use_cmd12 || use_cmd23)) {
1428  		*mode |= SDHCI_TRNS_AUTO_SEL;
1429  
1430  		ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1431  		if (use_cmd23)
1432  			ctrl2 |= SDHCI_CMD23_ENABLE;
1433  		else
1434  			ctrl2 &= ~SDHCI_CMD23_ENABLE;
1435  		sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1436  
1437  		return;
1438  	}
1439  
1440  	/*
1441  	 * If we are sending CMD23, CMD12 never gets sent
1442  	 * on successful completion (so no Auto-CMD12).
1443  	 */
1444  	if (use_cmd12)
1445  		*mode |= SDHCI_TRNS_AUTO_CMD12;
1446  	else if (use_cmd23)
1447  		*mode |= SDHCI_TRNS_AUTO_CMD23;
1448  }
1449  
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1450  static void sdhci_set_transfer_mode(struct sdhci_host *host,
1451  	struct mmc_command *cmd)
1452  {
1453  	u16 mode = 0;
1454  	struct mmc_data *data = cmd->data;
1455  
1456  	if (data == NULL) {
1457  		if (host->quirks2 &
1458  			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1459  			/* must not clear SDHCI_TRANSFER_MODE when tuning */
1460  			if (!mmc_op_tuning(cmd->opcode))
1461  				sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1462  		} else {
1463  		/* clear Auto CMD settings for no data CMDs */
1464  			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1465  			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1466  				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1467  		}
1468  		return;
1469  	}
1470  
1471  	WARN_ON(!host->data);
1472  
1473  	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1474  		mode = SDHCI_TRNS_BLK_CNT_EN;
1475  
1476  	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1477  		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1478  		sdhci_auto_cmd_select(host, cmd, &mode);
1479  		if (sdhci_auto_cmd23(host, cmd->mrq))
1480  			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1481  	}
1482  
1483  	if (data->flags & MMC_DATA_READ)
1484  		mode |= SDHCI_TRNS_READ;
1485  	if (host->flags & SDHCI_REQ_USE_DMA)
1486  		mode |= SDHCI_TRNS_DMA;
1487  
1488  	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1489  }
1490  
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1491  static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1492  {
1493  	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1494  		((mrq->cmd && mrq->cmd->error) ||
1495  		 (mrq->sbc && mrq->sbc->error) ||
1496  		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1497  		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1498  }
1499  
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1500  static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1501  {
1502  	int i;
1503  
1504  	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1505  		if (host->mrqs_done[i] == mrq) {
1506  			WARN_ON(1);
1507  			return;
1508  		}
1509  	}
1510  
1511  	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1512  		if (!host->mrqs_done[i]) {
1513  			host->mrqs_done[i] = mrq;
1514  			break;
1515  		}
1516  	}
1517  
1518  	WARN_ON(i >= SDHCI_MAX_MRQS);
1519  }
1520  
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1521  static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1522  {
1523  	if (host->cmd && host->cmd->mrq == mrq)
1524  		host->cmd = NULL;
1525  
1526  	if (host->data_cmd && host->data_cmd->mrq == mrq)
1527  		host->data_cmd = NULL;
1528  
1529  	if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1530  		host->deferred_cmd = NULL;
1531  
1532  	if (host->data && host->data->mrq == mrq)
1533  		host->data = NULL;
1534  
1535  	if (sdhci_needs_reset(host, mrq))
1536  		host->pending_reset = true;
1537  
1538  	sdhci_set_mrq_done(host, mrq);
1539  
1540  	sdhci_del_timer(host, mrq);
1541  
1542  	if (!sdhci_has_requests(host))
1543  		sdhci_led_deactivate(host);
1544  }
1545  
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1546  static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1547  {
1548  	__sdhci_finish_mrq(host, mrq);
1549  
1550  	queue_work(host->complete_wq, &host->complete_work);
1551  }
1552  
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1553  static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1554  {
1555  	struct mmc_command *data_cmd = host->data_cmd;
1556  	struct mmc_data *data = host->data;
1557  
1558  	host->data = NULL;
1559  	host->data_cmd = NULL;
1560  
1561  	/*
1562  	 * The controller needs a reset of internal state machines upon error
1563  	 * conditions.
1564  	 */
1565  	if (data->error) {
1566  		if (!host->cmd || host->cmd == data_cmd)
1567  			sdhci_reset_for(host, REQUEST_ERROR);
1568  		else
1569  			sdhci_reset_for(host, REQUEST_ERROR_DATA_ONLY);
1570  	}
1571  
1572  	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1573  	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1574  		sdhci_adma_table_post(host, data);
1575  
1576  	/*
1577  	 * The specification states that the block count register must
1578  	 * be updated, but it does not specify at what point in the
1579  	 * data flow. That makes the register entirely useless to read
1580  	 * back so we have to assume that nothing made it to the card
1581  	 * in the event of an error.
1582  	 */
1583  	if (data->error)
1584  		data->bytes_xfered = 0;
1585  	else
1586  		data->bytes_xfered = data->blksz * data->blocks;
1587  
1588  	/*
1589  	 * Need to send CMD12 if -
1590  	 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1591  	 * b) error in multiblock transfer
1592  	 */
1593  	if (data->stop &&
1594  	    ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1595  	     data->error)) {
1596  		/*
1597  		 * 'cap_cmd_during_tfr' request must not use the command line
1598  		 * after mmc_command_done() has been called. It is upper layer's
1599  		 * responsibility to send the stop command if required.
1600  		 */
1601  		if (data->mrq->cap_cmd_during_tfr) {
1602  			__sdhci_finish_mrq(host, data->mrq);
1603  		} else {
1604  			/* Avoid triggering warning in sdhci_send_command() */
1605  			host->cmd = NULL;
1606  			if (!sdhci_send_command(host, data->stop)) {
1607  				if (sw_data_timeout) {
1608  					/*
1609  					 * This is anyway a sw data timeout, so
1610  					 * give up now.
1611  					 */
1612  					data->stop->error = -EIO;
1613  					__sdhci_finish_mrq(host, data->mrq);
1614  				} else {
1615  					WARN_ON(host->deferred_cmd);
1616  					host->deferred_cmd = data->stop;
1617  				}
1618  			}
1619  		}
1620  	} else {
1621  		__sdhci_finish_mrq(host, data->mrq);
1622  	}
1623  }
1624  
sdhci_finish_data(struct sdhci_host * host)1625  static void sdhci_finish_data(struct sdhci_host *host)
1626  {
1627  	__sdhci_finish_data(host, false);
1628  }
1629  
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1630  static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1631  {
1632  	int flags;
1633  	u32 mask;
1634  	unsigned long timeout;
1635  
1636  	WARN_ON(host->cmd);
1637  
1638  	/* Initially, a command has no error */
1639  	cmd->error = 0;
1640  
1641  	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1642  	    cmd->opcode == MMC_STOP_TRANSMISSION)
1643  		cmd->flags |= MMC_RSP_BUSY;
1644  
1645  	mask = SDHCI_CMD_INHIBIT;
1646  	if (sdhci_data_line_cmd(cmd))
1647  		mask |= SDHCI_DATA_INHIBIT;
1648  
1649  	/* We shouldn't wait for data inihibit for stop commands, even
1650  	   though they might use busy signaling */
1651  	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1652  		mask &= ~SDHCI_DATA_INHIBIT;
1653  
1654  	if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1655  		return false;
1656  
1657  	host->cmd = cmd;
1658  	host->data_timeout = 0;
1659  	if (sdhci_data_line_cmd(cmd)) {
1660  		WARN_ON(host->data_cmd);
1661  		host->data_cmd = cmd;
1662  		sdhci_set_timeout(host, cmd);
1663  	}
1664  
1665  	if (cmd->data) {
1666  		if (host->use_external_dma)
1667  			sdhci_external_dma_prepare_data(host, cmd);
1668  		else
1669  			sdhci_prepare_data(host, cmd);
1670  	}
1671  
1672  	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1673  
1674  	sdhci_set_transfer_mode(host, cmd);
1675  
1676  	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1677  		WARN_ONCE(1, "Unsupported response type!\n");
1678  		/*
1679  		 * This does not happen in practice because 136-bit response
1680  		 * commands never have busy waiting, so rather than complicate
1681  		 * the error path, just remove busy waiting and continue.
1682  		 */
1683  		cmd->flags &= ~MMC_RSP_BUSY;
1684  	}
1685  
1686  	if (!(cmd->flags & MMC_RSP_PRESENT))
1687  		flags = SDHCI_CMD_RESP_NONE;
1688  	else if (cmd->flags & MMC_RSP_136)
1689  		flags = SDHCI_CMD_RESP_LONG;
1690  	else if (cmd->flags & MMC_RSP_BUSY)
1691  		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1692  	else
1693  		flags = SDHCI_CMD_RESP_SHORT;
1694  
1695  	if (cmd->flags & MMC_RSP_CRC)
1696  		flags |= SDHCI_CMD_CRC;
1697  	if (cmd->flags & MMC_RSP_OPCODE)
1698  		flags |= SDHCI_CMD_INDEX;
1699  
1700  	/* CMD19 is special in that the Data Present Select should be set */
1701  	if (cmd->data || mmc_op_tuning(cmd->opcode))
1702  		flags |= SDHCI_CMD_DATA;
1703  
1704  	timeout = jiffies;
1705  	if (host->data_timeout)
1706  		timeout += nsecs_to_jiffies(host->data_timeout);
1707  	else if (!cmd->data && cmd->busy_timeout > 9000)
1708  		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1709  	else
1710  		timeout += 10 * HZ;
1711  	sdhci_mod_timer(host, cmd->mrq, timeout);
1712  
1713  	if (host->use_external_dma)
1714  		sdhci_external_dma_pre_transfer(host, cmd);
1715  
1716  	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1717  
1718  	return true;
1719  }
1720  
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1721  static bool sdhci_present_error(struct sdhci_host *host,
1722  				struct mmc_command *cmd, bool present)
1723  {
1724  	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1725  		cmd->error = -ENOMEDIUM;
1726  		return true;
1727  	}
1728  
1729  	return false;
1730  }
1731  
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1732  static bool sdhci_send_command_retry(struct sdhci_host *host,
1733  				     struct mmc_command *cmd,
1734  				     unsigned long flags)
1735  	__releases(host->lock)
1736  	__acquires(host->lock)
1737  {
1738  	struct mmc_command *deferred_cmd = host->deferred_cmd;
1739  	int timeout = 10; /* Approx. 10 ms */
1740  	bool present;
1741  
1742  	while (!sdhci_send_command(host, cmd)) {
1743  		if (!timeout--) {
1744  			pr_err("%s: Controller never released inhibit bit(s).\n",
1745  			       mmc_hostname(host->mmc));
1746  			sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1747  			sdhci_dumpregs(host);
1748  			cmd->error = -EIO;
1749  			return false;
1750  		}
1751  
1752  		spin_unlock_irqrestore(&host->lock, flags);
1753  
1754  		usleep_range(1000, 1250);
1755  
1756  		present = host->mmc->ops->get_cd(host->mmc);
1757  
1758  		spin_lock_irqsave(&host->lock, flags);
1759  
1760  		/* A deferred command might disappear, handle that */
1761  		if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1762  			return true;
1763  
1764  		if (sdhci_present_error(host, cmd, present))
1765  			return false;
1766  	}
1767  
1768  	if (cmd == host->deferred_cmd)
1769  		host->deferred_cmd = NULL;
1770  
1771  	return true;
1772  }
1773  
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1774  static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1775  {
1776  	int i, reg;
1777  
1778  	for (i = 0; i < 4; i++) {
1779  		reg = SDHCI_RESPONSE + (3 - i) * 4;
1780  		cmd->resp[i] = sdhci_readl(host, reg);
1781  	}
1782  
1783  	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1784  		return;
1785  
1786  	/* CRC is stripped so we need to do some shifting */
1787  	for (i = 0; i < 4; i++) {
1788  		cmd->resp[i] <<= 8;
1789  		if (i != 3)
1790  			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1791  	}
1792  }
1793  
sdhci_finish_command(struct sdhci_host * host)1794  static void sdhci_finish_command(struct sdhci_host *host)
1795  {
1796  	struct mmc_command *cmd = host->cmd;
1797  
1798  	host->cmd = NULL;
1799  
1800  	if (cmd->flags & MMC_RSP_PRESENT) {
1801  		if (cmd->flags & MMC_RSP_136) {
1802  			sdhci_read_rsp_136(host, cmd);
1803  		} else {
1804  			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1805  		}
1806  	}
1807  
1808  	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1809  		mmc_command_done(host->mmc, cmd->mrq);
1810  
1811  	/*
1812  	 * The host can send and interrupt when the busy state has
1813  	 * ended, allowing us to wait without wasting CPU cycles.
1814  	 * The busy signal uses DAT0 so this is similar to waiting
1815  	 * for data to complete.
1816  	 *
1817  	 * Note: The 1.0 specification is a bit ambiguous about this
1818  	 *       feature so there might be some problems with older
1819  	 *       controllers.
1820  	 */
1821  	if (cmd->flags & MMC_RSP_BUSY) {
1822  		if (cmd->data) {
1823  			DBG("Cannot wait for busy signal when also doing a data transfer");
1824  		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1825  			   cmd == host->data_cmd) {
1826  			/* Command complete before busy is ended */
1827  			return;
1828  		}
1829  	}
1830  
1831  	/* Finished CMD23, now send actual command. */
1832  	if (cmd == cmd->mrq->sbc) {
1833  		if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1834  			WARN_ON(host->deferred_cmd);
1835  			host->deferred_cmd = cmd->mrq->cmd;
1836  		}
1837  	} else {
1838  
1839  		/* Processed actual command. */
1840  		if (host->data && host->data_early)
1841  			sdhci_finish_data(host);
1842  
1843  		if (!cmd->data)
1844  			__sdhci_finish_mrq(host, cmd->mrq);
1845  	}
1846  }
1847  
sdhci_get_preset_value(struct sdhci_host * host)1848  static u16 sdhci_get_preset_value(struct sdhci_host *host)
1849  {
1850  	u16 preset = 0;
1851  
1852  	switch (host->timing) {
1853  	case MMC_TIMING_MMC_HS:
1854  	case MMC_TIMING_SD_HS:
1855  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HIGH_SPEED);
1856  		break;
1857  	case MMC_TIMING_UHS_SDR12:
1858  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1859  		break;
1860  	case MMC_TIMING_UHS_SDR25:
1861  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1862  		break;
1863  	case MMC_TIMING_UHS_SDR50:
1864  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1865  		break;
1866  	case MMC_TIMING_UHS_SDR104:
1867  	case MMC_TIMING_MMC_HS200:
1868  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1869  		break;
1870  	case MMC_TIMING_UHS_DDR50:
1871  	case MMC_TIMING_MMC_DDR52:
1872  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1873  		break;
1874  	case MMC_TIMING_MMC_HS400:
1875  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1876  		break;
1877  	default:
1878  		pr_warn("%s: Invalid UHS-I mode selected\n",
1879  			mmc_hostname(host->mmc));
1880  		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1881  		break;
1882  	}
1883  	return preset;
1884  }
1885  
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1886  u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1887  		   unsigned int *actual_clock)
1888  {
1889  	int div = 0; /* Initialized for compiler warning */
1890  	int real_div = div, clk_mul = 1;
1891  	u16 clk = 0;
1892  	bool switch_base_clk = false;
1893  
1894  	if (host->version >= SDHCI_SPEC_300) {
1895  		if (host->preset_enabled) {
1896  			u16 pre_val;
1897  
1898  			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1899  			pre_val = sdhci_get_preset_value(host);
1900  			div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1901  			if (host->clk_mul &&
1902  				(pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1903  				clk = SDHCI_PROG_CLOCK_MODE;
1904  				real_div = div + 1;
1905  				clk_mul = host->clk_mul;
1906  			} else {
1907  				real_div = max_t(int, 1, div << 1);
1908  			}
1909  			goto clock_set;
1910  		}
1911  
1912  		/*
1913  		 * Check if the Host Controller supports Programmable Clock
1914  		 * Mode.
1915  		 */
1916  		if (host->clk_mul) {
1917  			for (div = 1; div <= 1024; div++) {
1918  				if ((host->max_clk * host->clk_mul / div)
1919  					<= clock)
1920  					break;
1921  			}
1922  			if ((host->max_clk * host->clk_mul / div) <= clock) {
1923  				/*
1924  				 * Set Programmable Clock Mode in the Clock
1925  				 * Control register.
1926  				 */
1927  				clk = SDHCI_PROG_CLOCK_MODE;
1928  				real_div = div;
1929  				clk_mul = host->clk_mul;
1930  				div--;
1931  			} else {
1932  				/*
1933  				 * Divisor can be too small to reach clock
1934  				 * speed requirement. Then use the base clock.
1935  				 */
1936  				switch_base_clk = true;
1937  			}
1938  		}
1939  
1940  		if (!host->clk_mul || switch_base_clk) {
1941  			/* Version 3.00 divisors must be a multiple of 2. */
1942  			if (host->max_clk <= clock)
1943  				div = 1;
1944  			else {
1945  				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1946  				     div += 2) {
1947  					if ((host->max_clk / div) <= clock)
1948  						break;
1949  				}
1950  			}
1951  			real_div = div;
1952  			div >>= 1;
1953  			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1954  				&& !div && host->max_clk <= 25000000)
1955  				div = 1;
1956  		}
1957  	} else {
1958  		/* Version 2.00 divisors must be a power of 2. */
1959  		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1960  			if ((host->max_clk / div) <= clock)
1961  				break;
1962  		}
1963  		real_div = div;
1964  		div >>= 1;
1965  	}
1966  
1967  clock_set:
1968  	if (real_div)
1969  		*actual_clock = (host->max_clk * clk_mul) / real_div;
1970  	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1971  	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1972  		<< SDHCI_DIVIDER_HI_SHIFT;
1973  
1974  	return clk;
1975  }
1976  EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1977  
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1978  void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1979  {
1980  	ktime_t timeout;
1981  
1982  	clk |= SDHCI_CLOCK_INT_EN;
1983  	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1984  
1985  	/* Wait max 150 ms */
1986  	timeout = ktime_add_ms(ktime_get(), 150);
1987  	while (1) {
1988  		bool timedout = ktime_after(ktime_get(), timeout);
1989  
1990  		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1991  		if (clk & SDHCI_CLOCK_INT_STABLE)
1992  			break;
1993  		if (timedout) {
1994  			pr_err("%s: Internal clock never stabilised.\n",
1995  			       mmc_hostname(host->mmc));
1996  			sdhci_err_stats_inc(host, CTRL_TIMEOUT);
1997  			sdhci_dumpregs(host);
1998  			return;
1999  		}
2000  		udelay(10);
2001  	}
2002  
2003  	if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
2004  		clk |= SDHCI_CLOCK_PLL_EN;
2005  		clk &= ~SDHCI_CLOCK_INT_STABLE;
2006  		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2007  
2008  		/* Wait max 150 ms */
2009  		timeout = ktime_add_ms(ktime_get(), 150);
2010  		while (1) {
2011  			bool timedout = ktime_after(ktime_get(), timeout);
2012  
2013  			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2014  			if (clk & SDHCI_CLOCK_INT_STABLE)
2015  				break;
2016  			if (timedout) {
2017  				pr_err("%s: PLL clock never stabilised.\n",
2018  				       mmc_hostname(host->mmc));
2019  				sdhci_err_stats_inc(host, CTRL_TIMEOUT);
2020  				sdhci_dumpregs(host);
2021  				return;
2022  			}
2023  			udelay(10);
2024  		}
2025  	}
2026  
2027  	clk |= SDHCI_CLOCK_CARD_EN;
2028  	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2029  }
2030  EXPORT_SYMBOL_GPL(sdhci_enable_clk);
2031  
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)2032  void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
2033  {
2034  	u16 clk;
2035  
2036  	host->mmc->actual_clock = 0;
2037  
2038  	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
2039  
2040  	if (clock == 0)
2041  		return;
2042  
2043  	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2044  	sdhci_enable_clk(host, clk);
2045  }
2046  EXPORT_SYMBOL_GPL(sdhci_set_clock);
2047  
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2048  static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2049  				unsigned short vdd)
2050  {
2051  	struct mmc_host *mmc = host->mmc;
2052  
2053  	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2054  
2055  	if (mode != MMC_POWER_OFF)
2056  		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2057  	else
2058  		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2059  }
2060  
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2061  void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2062  			   unsigned short vdd)
2063  {
2064  	u8 pwr = 0;
2065  
2066  	if (mode != MMC_POWER_OFF) {
2067  		switch (1 << vdd) {
2068  		case MMC_VDD_165_195:
2069  		/*
2070  		 * Without a regulator, SDHCI does not support 2.0v
2071  		 * so we only get here if the driver deliberately
2072  		 * added the 2.0v range to ocr_avail. Map it to 1.8v
2073  		 * for the purpose of turning on the power.
2074  		 */
2075  		case MMC_VDD_20_21:
2076  			pwr = SDHCI_POWER_180;
2077  			break;
2078  		case MMC_VDD_29_30:
2079  		case MMC_VDD_30_31:
2080  			pwr = SDHCI_POWER_300;
2081  			break;
2082  		case MMC_VDD_32_33:
2083  		case MMC_VDD_33_34:
2084  		/*
2085  		 * 3.4 ~ 3.6V are valid only for those platforms where it's
2086  		 * known that the voltage range is supported by hardware.
2087  		 */
2088  		case MMC_VDD_34_35:
2089  		case MMC_VDD_35_36:
2090  			pwr = SDHCI_POWER_330;
2091  			break;
2092  		default:
2093  			WARN(1, "%s: Invalid vdd %#x\n",
2094  			     mmc_hostname(host->mmc), vdd);
2095  			break;
2096  		}
2097  	}
2098  
2099  	if (host->pwr == pwr)
2100  		return;
2101  
2102  	host->pwr = pwr;
2103  
2104  	if (pwr == 0) {
2105  		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2106  		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2107  			sdhci_runtime_pm_bus_off(host);
2108  	} else {
2109  		/*
2110  		 * Spec says that we should clear the power reg before setting
2111  		 * a new value. Some controllers don't seem to like this though.
2112  		 */
2113  		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2114  			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2115  
2116  		/*
2117  		 * At least the Marvell CaFe chip gets confused if we set the
2118  		 * voltage and set turn on power at the same time, so set the
2119  		 * voltage first.
2120  		 */
2121  		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2122  			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2123  
2124  		pwr |= SDHCI_POWER_ON;
2125  
2126  		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2127  
2128  		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2129  			sdhci_runtime_pm_bus_on(host);
2130  
2131  		/*
2132  		 * Some controllers need an extra 10ms delay of 10ms before
2133  		 * they can apply clock after applying power
2134  		 */
2135  		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2136  			mdelay(10);
2137  	}
2138  }
2139  EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2140  
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2141  void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2142  		     unsigned short vdd)
2143  {
2144  	if (IS_ERR(host->mmc->supply.vmmc))
2145  		sdhci_set_power_noreg(host, mode, vdd);
2146  	else
2147  		sdhci_set_power_reg(host, mode, vdd);
2148  }
2149  EXPORT_SYMBOL_GPL(sdhci_set_power);
2150  
2151  /*
2152   * Some controllers need to configure a valid bus voltage on their power
2153   * register regardless of whether an external regulator is taking care of power
2154   * supply. This helper function takes care of it if set as the controller's
2155   * sdhci_ops.set_power callback.
2156   */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2157  void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2158  				     unsigned char mode,
2159  				     unsigned short vdd)
2160  {
2161  	if (!IS_ERR(host->mmc->supply.vmmc)) {
2162  		struct mmc_host *mmc = host->mmc;
2163  
2164  		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2165  	}
2166  	sdhci_set_power_noreg(host, mode, vdd);
2167  }
2168  EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2169  
2170  /*****************************************************************************\
2171   *                                                                           *
2172   * MMC callbacks                                                             *
2173   *                                                                           *
2174  \*****************************************************************************/
2175  
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2176  void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2177  {
2178  	struct sdhci_host *host = mmc_priv(mmc);
2179  	struct mmc_command *cmd;
2180  	unsigned long flags;
2181  	bool present;
2182  
2183  	/* Firstly check card presence */
2184  	present = mmc->ops->get_cd(mmc);
2185  
2186  	spin_lock_irqsave(&host->lock, flags);
2187  
2188  	sdhci_led_activate(host);
2189  
2190  	if (sdhci_present_error(host, mrq->cmd, present))
2191  		goto out_finish;
2192  
2193  	cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2194  
2195  	if (!sdhci_send_command_retry(host, cmd, flags))
2196  		goto out_finish;
2197  
2198  	spin_unlock_irqrestore(&host->lock, flags);
2199  
2200  	return;
2201  
2202  out_finish:
2203  	sdhci_finish_mrq(host, mrq);
2204  	spin_unlock_irqrestore(&host->lock, flags);
2205  }
2206  EXPORT_SYMBOL_GPL(sdhci_request);
2207  
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2208  int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2209  {
2210  	struct sdhci_host *host = mmc_priv(mmc);
2211  	struct mmc_command *cmd;
2212  	unsigned long flags;
2213  	int ret = 0;
2214  
2215  	spin_lock_irqsave(&host->lock, flags);
2216  
2217  	if (sdhci_present_error(host, mrq->cmd, true)) {
2218  		sdhci_finish_mrq(host, mrq);
2219  		goto out_finish;
2220  	}
2221  
2222  	cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2223  
2224  	/*
2225  	 * The HSQ may send a command in interrupt context without polling
2226  	 * the busy signaling, which means we should return BUSY if controller
2227  	 * has not released inhibit bits to allow HSQ trying to send request
2228  	 * again in non-atomic context. So we should not finish this request
2229  	 * here.
2230  	 */
2231  	if (!sdhci_send_command(host, cmd))
2232  		ret = -EBUSY;
2233  	else
2234  		sdhci_led_activate(host);
2235  
2236  out_finish:
2237  	spin_unlock_irqrestore(&host->lock, flags);
2238  	return ret;
2239  }
2240  EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2241  
sdhci_set_bus_width(struct sdhci_host * host,int width)2242  void sdhci_set_bus_width(struct sdhci_host *host, int width)
2243  {
2244  	u8 ctrl;
2245  
2246  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2247  	if (width == MMC_BUS_WIDTH_8) {
2248  		ctrl &= ~SDHCI_CTRL_4BITBUS;
2249  		ctrl |= SDHCI_CTRL_8BITBUS;
2250  	} else {
2251  		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2252  			ctrl &= ~SDHCI_CTRL_8BITBUS;
2253  		if (width == MMC_BUS_WIDTH_4)
2254  			ctrl |= SDHCI_CTRL_4BITBUS;
2255  		else
2256  			ctrl &= ~SDHCI_CTRL_4BITBUS;
2257  	}
2258  	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2259  }
2260  EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2261  
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2262  void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2263  {
2264  	u16 ctrl_2;
2265  
2266  	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2267  	/* Select Bus Speed Mode for host */
2268  	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2269  	if ((timing == MMC_TIMING_MMC_HS200) ||
2270  	    (timing == MMC_TIMING_UHS_SDR104))
2271  		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2272  	else if (timing == MMC_TIMING_UHS_SDR12)
2273  		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2274  	else if (timing == MMC_TIMING_UHS_SDR25)
2275  		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2276  	else if (timing == MMC_TIMING_UHS_SDR50)
2277  		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2278  	else if ((timing == MMC_TIMING_UHS_DDR50) ||
2279  		 (timing == MMC_TIMING_MMC_DDR52))
2280  		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2281  	else if (timing == MMC_TIMING_MMC_HS400)
2282  		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2283  	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2284  }
2285  EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2286  
sdhci_timing_has_preset(unsigned char timing)2287  static bool sdhci_timing_has_preset(unsigned char timing)
2288  {
2289  	switch (timing) {
2290  	case MMC_TIMING_UHS_SDR12:
2291  	case MMC_TIMING_UHS_SDR25:
2292  	case MMC_TIMING_UHS_SDR50:
2293  	case MMC_TIMING_UHS_SDR104:
2294  	case MMC_TIMING_UHS_DDR50:
2295  	case MMC_TIMING_MMC_DDR52:
2296  		return true;
2297  	}
2298  	return false;
2299  }
2300  
sdhci_preset_needed(struct sdhci_host * host,unsigned char timing)2301  static bool sdhci_preset_needed(struct sdhci_host *host, unsigned char timing)
2302  {
2303  	return !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2304  	       sdhci_timing_has_preset(timing);
2305  }
2306  
sdhci_presetable_values_change(struct sdhci_host * host,struct mmc_ios * ios)2307  static bool sdhci_presetable_values_change(struct sdhci_host *host, struct mmc_ios *ios)
2308  {
2309  	/*
2310  	 * Preset Values are: Driver Strength, Clock Generator and SDCLK/RCLK
2311  	 * Frequency. Check if preset values need to be enabled, or the Driver
2312  	 * Strength needs updating. Note, clock changes are handled separately.
2313  	 */
2314  	return !host->preset_enabled &&
2315  	       (sdhci_preset_needed(host, ios->timing) || host->drv_type != ios->drv_type);
2316  }
2317  
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2318  void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2319  {
2320  	struct sdhci_host *host = mmc_priv(mmc);
2321  	bool reinit_uhs = host->reinit_uhs;
2322  	bool turning_on_clk = false;
2323  	u8 ctrl;
2324  
2325  	host->reinit_uhs = false;
2326  
2327  	if (ios->power_mode == MMC_POWER_UNDEFINED)
2328  		return;
2329  
2330  	if (host->flags & SDHCI_DEVICE_DEAD) {
2331  		if (!IS_ERR(mmc->supply.vmmc) &&
2332  		    ios->power_mode == MMC_POWER_OFF)
2333  			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2334  		return;
2335  	}
2336  
2337  	/*
2338  	 * Reset the chip on each power off.
2339  	 * Should clear out any weird states.
2340  	 */
2341  	if (ios->power_mode == MMC_POWER_OFF) {
2342  		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2343  		sdhci_reinit(host);
2344  	}
2345  
2346  	if (host->version >= SDHCI_SPEC_300 &&
2347  		(ios->power_mode == MMC_POWER_UP) &&
2348  		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2349  		sdhci_enable_preset_value(host, false);
2350  
2351  	if (!ios->clock || ios->clock != host->clock) {
2352  		turning_on_clk = ios->clock && !host->clock;
2353  
2354  		host->ops->set_clock(host, ios->clock);
2355  		host->clock = ios->clock;
2356  
2357  		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2358  		    host->clock) {
2359  			host->timeout_clk = mmc->actual_clock ?
2360  						mmc->actual_clock / 1000 :
2361  						host->clock / 1000;
2362  			mmc->max_busy_timeout =
2363  				host->ops->get_max_timeout_count ?
2364  				host->ops->get_max_timeout_count(host) :
2365  				1 << 27;
2366  			mmc->max_busy_timeout /= host->timeout_clk;
2367  		}
2368  	}
2369  
2370  	if (host->ops->set_power)
2371  		host->ops->set_power(host, ios->power_mode, ios->vdd);
2372  	else
2373  		sdhci_set_power(host, ios->power_mode, ios->vdd);
2374  
2375  	if (host->ops->platform_send_init_74_clocks)
2376  		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2377  
2378  	host->ops->set_bus_width(host, ios->bus_width);
2379  
2380  	/*
2381  	 * Special case to avoid multiple clock changes during voltage
2382  	 * switching.
2383  	 */
2384  	if (!reinit_uhs &&
2385  	    turning_on_clk &&
2386  	    host->timing == ios->timing &&
2387  	    host->version >= SDHCI_SPEC_300 &&
2388  	    !sdhci_presetable_values_change(host, ios))
2389  		return;
2390  
2391  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2392  
2393  	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2394  		if (ios->timing == MMC_TIMING_SD_HS ||
2395  		     ios->timing == MMC_TIMING_MMC_HS ||
2396  		     ios->timing == MMC_TIMING_MMC_HS400 ||
2397  		     ios->timing == MMC_TIMING_MMC_HS200 ||
2398  		     ios->timing == MMC_TIMING_MMC_DDR52 ||
2399  		     ios->timing == MMC_TIMING_UHS_SDR50 ||
2400  		     ios->timing == MMC_TIMING_UHS_SDR104 ||
2401  		     ios->timing == MMC_TIMING_UHS_DDR50 ||
2402  		     ios->timing == MMC_TIMING_UHS_SDR25)
2403  			ctrl |= SDHCI_CTRL_HISPD;
2404  		else
2405  			ctrl &= ~SDHCI_CTRL_HISPD;
2406  	}
2407  
2408  	if (host->version >= SDHCI_SPEC_300) {
2409  		u16 clk, ctrl_2;
2410  
2411  		/*
2412  		 * According to SDHCI Spec v3.00, if the Preset Value
2413  		 * Enable in the Host Control 2 register is set, we
2414  		 * need to reset SD Clock Enable before changing High
2415  		 * Speed Enable to avoid generating clock glitches.
2416  		 */
2417  		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2418  		if (clk & SDHCI_CLOCK_CARD_EN) {
2419  			clk &= ~SDHCI_CLOCK_CARD_EN;
2420  			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2421  		}
2422  
2423  		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2424  
2425  		if (!host->preset_enabled) {
2426  			/*
2427  			 * We only need to set Driver Strength if the
2428  			 * preset value enable is not set.
2429  			 */
2430  			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2431  			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2432  			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2433  				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2434  			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2435  				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2436  			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2437  				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2438  			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2439  				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2440  			else {
2441  				pr_warn("%s: invalid driver type, default to driver type B\n",
2442  					mmc_hostname(mmc));
2443  				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2444  			}
2445  
2446  			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2447  			host->drv_type = ios->drv_type;
2448  		}
2449  
2450  		host->ops->set_uhs_signaling(host, ios->timing);
2451  		host->timing = ios->timing;
2452  
2453  		if (sdhci_preset_needed(host, ios->timing)) {
2454  			u16 preset;
2455  
2456  			sdhci_enable_preset_value(host, true);
2457  			preset = sdhci_get_preset_value(host);
2458  			ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2459  						  preset);
2460  			host->drv_type = ios->drv_type;
2461  		}
2462  
2463  		/* Re-enable SD Clock */
2464  		host->ops->set_clock(host, host->clock);
2465  	} else
2466  		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2467  }
2468  EXPORT_SYMBOL_GPL(sdhci_set_ios);
2469  
sdhci_get_cd(struct mmc_host * mmc)2470  static int sdhci_get_cd(struct mmc_host *mmc)
2471  {
2472  	struct sdhci_host *host = mmc_priv(mmc);
2473  	int gpio_cd = mmc_gpio_get_cd(mmc);
2474  
2475  	if (host->flags & SDHCI_DEVICE_DEAD)
2476  		return 0;
2477  
2478  	/* If nonremovable, assume that the card is always present. */
2479  	if (!mmc_card_is_removable(mmc))
2480  		return 1;
2481  
2482  	/*
2483  	 * Try slot gpio detect, if defined it take precedence
2484  	 * over build in controller functionality
2485  	 */
2486  	if (gpio_cd >= 0)
2487  		return !!gpio_cd;
2488  
2489  	/* If polling, assume that the card is always present. */
2490  	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2491  		return 1;
2492  
2493  	/* Host native card detect */
2494  	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2495  }
2496  
sdhci_get_cd_nogpio(struct mmc_host * mmc)2497  int sdhci_get_cd_nogpio(struct mmc_host *mmc)
2498  {
2499  	struct sdhci_host *host = mmc_priv(mmc);
2500  	unsigned long flags;
2501  	int ret = 0;
2502  
2503  	spin_lock_irqsave(&host->lock, flags);
2504  
2505  	if (host->flags & SDHCI_DEVICE_DEAD)
2506  		goto out;
2507  
2508  	ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2509  out:
2510  	spin_unlock_irqrestore(&host->lock, flags);
2511  
2512  	return ret;
2513  }
2514  EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
2515  
sdhci_check_ro(struct sdhci_host * host)2516  static int sdhci_check_ro(struct sdhci_host *host)
2517  {
2518  	unsigned long flags;
2519  	int is_readonly;
2520  
2521  	spin_lock_irqsave(&host->lock, flags);
2522  
2523  	if (host->flags & SDHCI_DEVICE_DEAD)
2524  		is_readonly = 0;
2525  	else if (host->ops->get_ro)
2526  		is_readonly = host->ops->get_ro(host);
2527  	else if (mmc_can_gpio_ro(host->mmc))
2528  		is_readonly = mmc_gpio_get_ro(host->mmc);
2529  	else
2530  		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2531  				& SDHCI_WRITE_PROTECT);
2532  
2533  	spin_unlock_irqrestore(&host->lock, flags);
2534  
2535  	/* This quirk needs to be replaced by a callback-function later */
2536  	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2537  		!is_readonly : is_readonly;
2538  }
2539  
2540  #define SAMPLE_COUNT	5
2541  
sdhci_get_ro(struct mmc_host * mmc)2542  static int sdhci_get_ro(struct mmc_host *mmc)
2543  {
2544  	struct sdhci_host *host = mmc_priv(mmc);
2545  	int i, ro_count;
2546  
2547  	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2548  		return sdhci_check_ro(host);
2549  
2550  	ro_count = 0;
2551  	for (i = 0; i < SAMPLE_COUNT; i++) {
2552  		if (sdhci_check_ro(host)) {
2553  			if (++ro_count > SAMPLE_COUNT / 2)
2554  				return 1;
2555  		}
2556  		msleep(30);
2557  	}
2558  	return 0;
2559  }
2560  
sdhci_hw_reset(struct mmc_host * mmc)2561  static void sdhci_hw_reset(struct mmc_host *mmc)
2562  {
2563  	struct sdhci_host *host = mmc_priv(mmc);
2564  
2565  	if (host->ops && host->ops->hw_reset)
2566  		host->ops->hw_reset(host);
2567  }
2568  
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2569  static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2570  {
2571  	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2572  		if (enable)
2573  			host->ier |= SDHCI_INT_CARD_INT;
2574  		else
2575  			host->ier &= ~SDHCI_INT_CARD_INT;
2576  
2577  		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2578  		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2579  	}
2580  }
2581  
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2582  void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2583  {
2584  	struct sdhci_host *host = mmc_priv(mmc);
2585  	unsigned long flags;
2586  
2587  	if (enable)
2588  		pm_runtime_get_noresume(mmc_dev(mmc));
2589  
2590  	spin_lock_irqsave(&host->lock, flags);
2591  	sdhci_enable_sdio_irq_nolock(host, enable);
2592  	spin_unlock_irqrestore(&host->lock, flags);
2593  
2594  	if (!enable)
2595  		pm_runtime_put_noidle(mmc_dev(mmc));
2596  }
2597  EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2598  
sdhci_ack_sdio_irq(struct mmc_host * mmc)2599  static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2600  {
2601  	struct sdhci_host *host = mmc_priv(mmc);
2602  	unsigned long flags;
2603  
2604  	spin_lock_irqsave(&host->lock, flags);
2605  	sdhci_enable_sdio_irq_nolock(host, true);
2606  	spin_unlock_irqrestore(&host->lock, flags);
2607  }
2608  
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2609  int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2610  				      struct mmc_ios *ios)
2611  {
2612  	struct sdhci_host *host = mmc_priv(mmc);
2613  	u16 ctrl;
2614  	int ret;
2615  
2616  	/*
2617  	 * Signal Voltage Switching is only applicable for Host Controllers
2618  	 * v3.00 and above.
2619  	 */
2620  	if (host->version < SDHCI_SPEC_300)
2621  		return 0;
2622  
2623  	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2624  
2625  	switch (ios->signal_voltage) {
2626  	case MMC_SIGNAL_VOLTAGE_330:
2627  		if (!(host->flags & SDHCI_SIGNALING_330))
2628  			return -EINVAL;
2629  		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2630  		ctrl &= ~SDHCI_CTRL_VDD_180;
2631  		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2632  
2633  		if (!IS_ERR(mmc->supply.vqmmc)) {
2634  			ret = mmc_regulator_set_vqmmc(mmc, ios);
2635  			if (ret < 0) {
2636  				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2637  					mmc_hostname(mmc));
2638  				return -EIO;
2639  			}
2640  		}
2641  		/* Wait for 5ms */
2642  		usleep_range(5000, 5500);
2643  
2644  		/* 3.3V regulator output should be stable within 5 ms */
2645  		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2646  		if (!(ctrl & SDHCI_CTRL_VDD_180))
2647  			return 0;
2648  
2649  		pr_warn("%s: 3.3V regulator output did not become stable\n",
2650  			mmc_hostname(mmc));
2651  
2652  		return -EAGAIN;
2653  	case MMC_SIGNAL_VOLTAGE_180:
2654  		if (!(host->flags & SDHCI_SIGNALING_180))
2655  			return -EINVAL;
2656  		if (!IS_ERR(mmc->supply.vqmmc)) {
2657  			ret = mmc_regulator_set_vqmmc(mmc, ios);
2658  			if (ret < 0) {
2659  				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2660  					mmc_hostname(mmc));
2661  				return -EIO;
2662  			}
2663  		}
2664  
2665  		/*
2666  		 * Enable 1.8V Signal Enable in the Host Control2
2667  		 * register
2668  		 */
2669  		ctrl |= SDHCI_CTRL_VDD_180;
2670  		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2671  
2672  		/* Some controller need to do more when switching */
2673  		if (host->ops->voltage_switch)
2674  			host->ops->voltage_switch(host);
2675  
2676  		/* 1.8V regulator output should be stable within 5 ms */
2677  		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2678  		if (ctrl & SDHCI_CTRL_VDD_180)
2679  			return 0;
2680  
2681  		pr_warn("%s: 1.8V regulator output did not become stable\n",
2682  			mmc_hostname(mmc));
2683  
2684  		return -EAGAIN;
2685  	case MMC_SIGNAL_VOLTAGE_120:
2686  		if (!(host->flags & SDHCI_SIGNALING_120))
2687  			return -EINVAL;
2688  		if (!IS_ERR(mmc->supply.vqmmc)) {
2689  			ret = mmc_regulator_set_vqmmc(mmc, ios);
2690  			if (ret < 0) {
2691  				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2692  					mmc_hostname(mmc));
2693  				return -EIO;
2694  			}
2695  		}
2696  		return 0;
2697  	default:
2698  		/* No signal voltage switch required */
2699  		return 0;
2700  	}
2701  }
2702  EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2703  
sdhci_card_busy(struct mmc_host * mmc)2704  static int sdhci_card_busy(struct mmc_host *mmc)
2705  {
2706  	struct sdhci_host *host = mmc_priv(mmc);
2707  	u32 present_state;
2708  
2709  	/* Check whether DAT[0] is 0 */
2710  	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2711  
2712  	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2713  }
2714  
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2715  static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2716  {
2717  	struct sdhci_host *host = mmc_priv(mmc);
2718  	unsigned long flags;
2719  
2720  	spin_lock_irqsave(&host->lock, flags);
2721  	host->flags |= SDHCI_HS400_TUNING;
2722  	spin_unlock_irqrestore(&host->lock, flags);
2723  
2724  	return 0;
2725  }
2726  
sdhci_start_tuning(struct sdhci_host * host)2727  void sdhci_start_tuning(struct sdhci_host *host)
2728  {
2729  	u16 ctrl;
2730  
2731  	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2732  	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2733  	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2734  		ctrl |= SDHCI_CTRL_TUNED_CLK;
2735  	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2736  
2737  	/*
2738  	 * As per the Host Controller spec v3.00, tuning command
2739  	 * generates Buffer Read Ready interrupt, so enable that.
2740  	 *
2741  	 * Note: The spec clearly says that when tuning sequence
2742  	 * is being performed, the controller does not generate
2743  	 * interrupts other than Buffer Read Ready interrupt. But
2744  	 * to make sure we don't hit a controller bug, we _only_
2745  	 * enable Buffer Read Ready interrupt here.
2746  	 */
2747  	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2748  	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2749  }
2750  EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2751  
sdhci_end_tuning(struct sdhci_host * host)2752  void sdhci_end_tuning(struct sdhci_host *host)
2753  {
2754  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2755  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2756  }
2757  EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2758  
sdhci_reset_tuning(struct sdhci_host * host)2759  void sdhci_reset_tuning(struct sdhci_host *host)
2760  {
2761  	u16 ctrl;
2762  
2763  	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2764  	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2765  	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2766  	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2767  }
2768  EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2769  
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2770  void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2771  {
2772  	sdhci_reset_tuning(host);
2773  
2774  	sdhci_reset_for(host, TUNING_ABORT);
2775  
2776  	sdhci_end_tuning(host);
2777  
2778  	mmc_send_abort_tuning(host->mmc, opcode);
2779  }
2780  EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2781  
2782  /*
2783   * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2784   * tuning command does not have a data payload (or rather the hardware does it
2785   * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2786   * interrupt setup is different to other commands and there is no timeout
2787   * interrupt so special handling is needed.
2788   */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2789  void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2790  {
2791  	struct mmc_host *mmc = host->mmc;
2792  	struct mmc_command cmd = {};
2793  	struct mmc_request mrq = {};
2794  	unsigned long flags;
2795  	u32 b = host->sdma_boundary;
2796  
2797  	spin_lock_irqsave(&host->lock, flags);
2798  
2799  	cmd.opcode = opcode;
2800  	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2801  	cmd.mrq = &mrq;
2802  
2803  	mrq.cmd = &cmd;
2804  	/*
2805  	 * In response to CMD19, the card sends 64 bytes of tuning
2806  	 * block to the Host Controller. So we set the block size
2807  	 * to 64 here.
2808  	 */
2809  	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2810  	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2811  		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2812  	else
2813  		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2814  
2815  	/*
2816  	 * The tuning block is sent by the card to the host controller.
2817  	 * So we set the TRNS_READ bit in the Transfer Mode register.
2818  	 * This also takes care of setting DMA Enable and Multi Block
2819  	 * Select in the same register to 0.
2820  	 */
2821  	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2822  
2823  	if (!sdhci_send_command_retry(host, &cmd, flags)) {
2824  		spin_unlock_irqrestore(&host->lock, flags);
2825  		host->tuning_done = 0;
2826  		return;
2827  	}
2828  
2829  	host->cmd = NULL;
2830  
2831  	sdhci_del_timer(host, &mrq);
2832  
2833  	host->tuning_done = 0;
2834  
2835  	spin_unlock_irqrestore(&host->lock, flags);
2836  
2837  	/* Wait for Buffer Read Ready interrupt */
2838  	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2839  			   msecs_to_jiffies(50));
2840  
2841  }
2842  EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2843  
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2844  static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2845  {
2846  	int i;
2847  
2848  	/*
2849  	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2850  	 * of loops reaches tuning loop count.
2851  	 */
2852  	for (i = 0; i < host->tuning_loop_count; i++) {
2853  		u16 ctrl;
2854  
2855  		sdhci_send_tuning(host, opcode);
2856  
2857  		if (!host->tuning_done) {
2858  			pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2859  				 mmc_hostname(host->mmc));
2860  			sdhci_abort_tuning(host, opcode);
2861  			return -ETIMEDOUT;
2862  		}
2863  
2864  		/* Spec does not require a delay between tuning cycles */
2865  		if (host->tuning_delay > 0)
2866  			mdelay(host->tuning_delay);
2867  
2868  		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2869  		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2870  			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2871  				return 0; /* Success! */
2872  			break;
2873  		}
2874  
2875  	}
2876  
2877  	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2878  		mmc_hostname(host->mmc));
2879  	sdhci_reset_tuning(host);
2880  	return -EAGAIN;
2881  }
2882  
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2883  int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2884  {
2885  	struct sdhci_host *host = mmc_priv(mmc);
2886  	int err = 0;
2887  	unsigned int tuning_count = 0;
2888  	bool hs400_tuning;
2889  
2890  	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2891  
2892  	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2893  		tuning_count = host->tuning_count;
2894  
2895  	/*
2896  	 * The Host Controller needs tuning in case of SDR104 and DDR50
2897  	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2898  	 * the Capabilities register.
2899  	 * If the Host Controller supports the HS200 mode then the
2900  	 * tuning function has to be executed.
2901  	 */
2902  	switch (host->timing) {
2903  	/* HS400 tuning is done in HS200 mode */
2904  	case MMC_TIMING_MMC_HS400:
2905  		err = -EINVAL;
2906  		goto out;
2907  
2908  	case MMC_TIMING_MMC_HS200:
2909  		/*
2910  		 * Periodic re-tuning for HS400 is not expected to be needed, so
2911  		 * disable it here.
2912  		 */
2913  		if (hs400_tuning)
2914  			tuning_count = 0;
2915  		break;
2916  
2917  	case MMC_TIMING_UHS_SDR104:
2918  	case MMC_TIMING_UHS_DDR50:
2919  		break;
2920  
2921  	case MMC_TIMING_UHS_SDR50:
2922  		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2923  			break;
2924  		fallthrough;
2925  
2926  	default:
2927  		goto out;
2928  	}
2929  
2930  	if (host->ops->platform_execute_tuning) {
2931  		err = host->ops->platform_execute_tuning(host, opcode);
2932  		goto out;
2933  	}
2934  
2935  	mmc->retune_period = tuning_count;
2936  
2937  	if (host->tuning_delay < 0)
2938  		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2939  
2940  	sdhci_start_tuning(host);
2941  
2942  	host->tuning_err = __sdhci_execute_tuning(host, opcode);
2943  
2944  	sdhci_end_tuning(host);
2945  out:
2946  	host->flags &= ~SDHCI_HS400_TUNING;
2947  
2948  	return err;
2949  }
2950  EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2951  
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2952  static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2953  {
2954  	/* Host Controller v3.00 defines preset value registers */
2955  	if (host->version < SDHCI_SPEC_300)
2956  		return;
2957  
2958  	/*
2959  	 * We only enable or disable Preset Value if they are not already
2960  	 * enabled or disabled respectively. Otherwise, we bail out.
2961  	 */
2962  	if (host->preset_enabled != enable) {
2963  		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2964  
2965  		if (enable)
2966  			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2967  		else
2968  			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2969  
2970  		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2971  
2972  		if (enable)
2973  			host->flags |= SDHCI_PV_ENABLED;
2974  		else
2975  			host->flags &= ~SDHCI_PV_ENABLED;
2976  
2977  		host->preset_enabled = enable;
2978  	}
2979  }
2980  
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2981  static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2982  				int err)
2983  {
2984  	struct mmc_data *data = mrq->data;
2985  
2986  	if (data->host_cookie != COOKIE_UNMAPPED)
2987  		dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2988  			     mmc_get_dma_dir(data));
2989  
2990  	data->host_cookie = COOKIE_UNMAPPED;
2991  }
2992  
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2993  static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2994  {
2995  	struct sdhci_host *host = mmc_priv(mmc);
2996  
2997  	mrq->data->host_cookie = COOKIE_UNMAPPED;
2998  
2999  	/*
3000  	 * No pre-mapping in the pre hook if we're using the bounce buffer,
3001  	 * for that we would need two bounce buffers since one buffer is
3002  	 * in flight when this is getting called.
3003  	 */
3004  	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
3005  		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
3006  }
3007  
sdhci_error_out_mrqs(struct sdhci_host * host,int err)3008  static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
3009  {
3010  	if (host->data_cmd) {
3011  		host->data_cmd->error = err;
3012  		sdhci_finish_mrq(host, host->data_cmd->mrq);
3013  	}
3014  
3015  	if (host->cmd) {
3016  		host->cmd->error = err;
3017  		sdhci_finish_mrq(host, host->cmd->mrq);
3018  	}
3019  }
3020  
sdhci_card_event(struct mmc_host * mmc)3021  static void sdhci_card_event(struct mmc_host *mmc)
3022  {
3023  	struct sdhci_host *host = mmc_priv(mmc);
3024  	unsigned long flags;
3025  	int present;
3026  
3027  	/* First check if client has provided their own card event */
3028  	if (host->ops->card_event)
3029  		host->ops->card_event(host);
3030  
3031  	present = mmc->ops->get_cd(mmc);
3032  
3033  	spin_lock_irqsave(&host->lock, flags);
3034  
3035  	/* Check sdhci_has_requests() first in case we are runtime suspended */
3036  	if (sdhci_has_requests(host) && !present) {
3037  		pr_err("%s: Card removed during transfer!\n",
3038  			mmc_hostname(mmc));
3039  		pr_err("%s: Resetting controller.\n",
3040  			mmc_hostname(mmc));
3041  
3042  		sdhci_reset_for(host, CARD_REMOVED);
3043  
3044  		sdhci_error_out_mrqs(host, -ENOMEDIUM);
3045  	}
3046  
3047  	spin_unlock_irqrestore(&host->lock, flags);
3048  }
3049  
3050  static const struct mmc_host_ops sdhci_ops = {
3051  	.request	= sdhci_request,
3052  	.post_req	= sdhci_post_req,
3053  	.pre_req	= sdhci_pre_req,
3054  	.set_ios	= sdhci_set_ios,
3055  	.get_cd		= sdhci_get_cd,
3056  	.get_ro		= sdhci_get_ro,
3057  	.card_hw_reset	= sdhci_hw_reset,
3058  	.enable_sdio_irq = sdhci_enable_sdio_irq,
3059  	.ack_sdio_irq    = sdhci_ack_sdio_irq,
3060  	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
3061  	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
3062  	.execute_tuning			= sdhci_execute_tuning,
3063  	.card_event			= sdhci_card_event,
3064  	.card_busy	= sdhci_card_busy,
3065  };
3066  
3067  /*****************************************************************************\
3068   *                                                                           *
3069   * Request done                                                              *
3070   *                                                                           *
3071  \*****************************************************************************/
3072  
sdhci_request_done(struct sdhci_host * host)3073  static bool sdhci_request_done(struct sdhci_host *host)
3074  {
3075  	unsigned long flags;
3076  	struct mmc_request *mrq;
3077  	int i;
3078  
3079  	spin_lock_irqsave(&host->lock, flags);
3080  
3081  	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3082  		mrq = host->mrqs_done[i];
3083  		if (mrq)
3084  			break;
3085  	}
3086  
3087  	if (!mrq) {
3088  		spin_unlock_irqrestore(&host->lock, flags);
3089  		return true;
3090  	}
3091  
3092  	/*
3093  	 * The controller needs a reset of internal state machines
3094  	 * upon error conditions.
3095  	 */
3096  	if (sdhci_needs_reset(host, mrq)) {
3097  		/*
3098  		 * Do not finish until command and data lines are available for
3099  		 * reset. Note there can only be one other mrq, so it cannot
3100  		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3101  		 * would both be null.
3102  		 */
3103  		if (host->cmd || host->data_cmd) {
3104  			spin_unlock_irqrestore(&host->lock, flags);
3105  			return true;
3106  		}
3107  
3108  		/* Some controllers need this kick or reset won't work here */
3109  		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3110  			/* This is to force an update */
3111  			host->ops->set_clock(host, host->clock);
3112  
3113  		sdhci_reset_for(host, REQUEST_ERROR);
3114  
3115  		host->pending_reset = false;
3116  	}
3117  
3118  	/*
3119  	 * Always unmap the data buffers if they were mapped by
3120  	 * sdhci_prepare_data() whenever we finish with a request.
3121  	 * This avoids leaking DMA mappings on error.
3122  	 */
3123  	if (host->flags & SDHCI_REQ_USE_DMA) {
3124  		struct mmc_data *data = mrq->data;
3125  
3126  		if (host->use_external_dma && data &&
3127  		    (mrq->cmd->error || data->error)) {
3128  			struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3129  
3130  			host->mrqs_done[i] = NULL;
3131  			spin_unlock_irqrestore(&host->lock, flags);
3132  			dmaengine_terminate_sync(chan);
3133  			spin_lock_irqsave(&host->lock, flags);
3134  			sdhci_set_mrq_done(host, mrq);
3135  		}
3136  
3137  		if (data && data->host_cookie == COOKIE_MAPPED) {
3138  			if (host->bounce_buffer) {
3139  				/*
3140  				 * On reads, copy the bounced data into the
3141  				 * sglist
3142  				 */
3143  				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3144  					unsigned int length = data->bytes_xfered;
3145  
3146  					if (length > host->bounce_buffer_size) {
3147  						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3148  						       mmc_hostname(host->mmc),
3149  						       host->bounce_buffer_size,
3150  						       data->bytes_xfered);
3151  						/* Cap it down and continue */
3152  						length = host->bounce_buffer_size;
3153  					}
3154  					dma_sync_single_for_cpu(
3155  						mmc_dev(host->mmc),
3156  						host->bounce_addr,
3157  						host->bounce_buffer_size,
3158  						DMA_FROM_DEVICE);
3159  					sg_copy_from_buffer(data->sg,
3160  						data->sg_len,
3161  						host->bounce_buffer,
3162  						length);
3163  				} else {
3164  					/* No copying, just switch ownership */
3165  					dma_sync_single_for_cpu(
3166  						mmc_dev(host->mmc),
3167  						host->bounce_addr,
3168  						host->bounce_buffer_size,
3169  						mmc_get_dma_dir(data));
3170  				}
3171  			} else {
3172  				/* Unmap the raw data */
3173  				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3174  					     data->sg_len,
3175  					     mmc_get_dma_dir(data));
3176  			}
3177  			data->host_cookie = COOKIE_UNMAPPED;
3178  		}
3179  	}
3180  
3181  	host->mrqs_done[i] = NULL;
3182  
3183  	spin_unlock_irqrestore(&host->lock, flags);
3184  
3185  	if (host->ops->request_done)
3186  		host->ops->request_done(host, mrq);
3187  	else
3188  		mmc_request_done(host->mmc, mrq);
3189  
3190  	return false;
3191  }
3192  
sdhci_complete_work(struct work_struct * work)3193  static void sdhci_complete_work(struct work_struct *work)
3194  {
3195  	struct sdhci_host *host = container_of(work, struct sdhci_host,
3196  					       complete_work);
3197  
3198  	while (!sdhci_request_done(host))
3199  		;
3200  }
3201  
sdhci_timeout_timer(struct timer_list * t)3202  static void sdhci_timeout_timer(struct timer_list *t)
3203  {
3204  	struct sdhci_host *host;
3205  	unsigned long flags;
3206  
3207  	host = from_timer(host, t, timer);
3208  
3209  	spin_lock_irqsave(&host->lock, flags);
3210  
3211  	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3212  		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3213  		       mmc_hostname(host->mmc));
3214  		sdhci_err_stats_inc(host, REQ_TIMEOUT);
3215  		sdhci_dumpregs(host);
3216  
3217  		host->cmd->error = -ETIMEDOUT;
3218  		sdhci_finish_mrq(host, host->cmd->mrq);
3219  	}
3220  
3221  	spin_unlock_irqrestore(&host->lock, flags);
3222  }
3223  
sdhci_timeout_data_timer(struct timer_list * t)3224  static void sdhci_timeout_data_timer(struct timer_list *t)
3225  {
3226  	struct sdhci_host *host;
3227  	unsigned long flags;
3228  
3229  	host = from_timer(host, t, data_timer);
3230  
3231  	spin_lock_irqsave(&host->lock, flags);
3232  
3233  	if (host->data || host->data_cmd ||
3234  	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3235  		pr_err("%s: Timeout waiting for hardware interrupt.\n",
3236  		       mmc_hostname(host->mmc));
3237  		sdhci_err_stats_inc(host, REQ_TIMEOUT);
3238  		sdhci_dumpregs(host);
3239  
3240  		if (host->data) {
3241  			host->data->error = -ETIMEDOUT;
3242  			__sdhci_finish_data(host, true);
3243  			queue_work(host->complete_wq, &host->complete_work);
3244  		} else if (host->data_cmd) {
3245  			host->data_cmd->error = -ETIMEDOUT;
3246  			sdhci_finish_mrq(host, host->data_cmd->mrq);
3247  		} else {
3248  			host->cmd->error = -ETIMEDOUT;
3249  			sdhci_finish_mrq(host, host->cmd->mrq);
3250  		}
3251  	}
3252  
3253  	spin_unlock_irqrestore(&host->lock, flags);
3254  }
3255  
3256  /*****************************************************************************\
3257   *                                                                           *
3258   * Interrupt handling                                                        *
3259   *                                                                           *
3260  \*****************************************************************************/
3261  
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3262  static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3263  {
3264  	/* Handle auto-CMD12 error */
3265  	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3266  		struct mmc_request *mrq = host->data_cmd->mrq;
3267  		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3268  		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3269  				   SDHCI_INT_DATA_TIMEOUT :
3270  				   SDHCI_INT_DATA_CRC;
3271  
3272  		/* Treat auto-CMD12 error the same as data error */
3273  		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3274  			*intmask_p |= data_err_bit;
3275  			return;
3276  		}
3277  	}
3278  
3279  	if (!host->cmd) {
3280  		/*
3281  		 * SDHCI recovers from errors by resetting the cmd and data
3282  		 * circuits.  Until that is done, there very well might be more
3283  		 * interrupts, so ignore them in that case.
3284  		 */
3285  		if (host->pending_reset)
3286  			return;
3287  		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3288  		       mmc_hostname(host->mmc), (unsigned)intmask);
3289  		sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3290  		sdhci_dumpregs(host);
3291  		return;
3292  	}
3293  
3294  	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3295  		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3296  		if (intmask & SDHCI_INT_TIMEOUT) {
3297  			host->cmd->error = -ETIMEDOUT;
3298  			sdhci_err_stats_inc(host, CMD_TIMEOUT);
3299  		} else {
3300  			host->cmd->error = -EILSEQ;
3301  			if (!mmc_op_tuning(host->cmd->opcode))
3302  				sdhci_err_stats_inc(host, CMD_CRC);
3303  		}
3304  		/* Treat data command CRC error the same as data CRC error */
3305  		if (host->cmd->data &&
3306  		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3307  		     SDHCI_INT_CRC) {
3308  			host->cmd = NULL;
3309  			*intmask_p |= SDHCI_INT_DATA_CRC;
3310  			return;
3311  		}
3312  
3313  		__sdhci_finish_mrq(host, host->cmd->mrq);
3314  		return;
3315  	}
3316  
3317  	/* Handle auto-CMD23 error */
3318  	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3319  		struct mmc_request *mrq = host->cmd->mrq;
3320  		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3321  		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3322  			  -ETIMEDOUT :
3323  			  -EILSEQ;
3324  
3325  		sdhci_err_stats_inc(host, AUTO_CMD);
3326  
3327  		if (sdhci_auto_cmd23(host, mrq)) {
3328  			mrq->sbc->error = err;
3329  			__sdhci_finish_mrq(host, mrq);
3330  			return;
3331  		}
3332  	}
3333  
3334  	if (intmask & SDHCI_INT_RESPONSE)
3335  		sdhci_finish_command(host);
3336  }
3337  
sdhci_adma_show_error(struct sdhci_host * host)3338  static void sdhci_adma_show_error(struct sdhci_host *host)
3339  {
3340  	void *desc = host->adma_table;
3341  	dma_addr_t dma = host->adma_addr;
3342  
3343  	sdhci_dumpregs(host);
3344  
3345  	while (true) {
3346  		struct sdhci_adma2_64_desc *dma_desc = desc;
3347  
3348  		if (host->flags & SDHCI_USE_64_BIT_DMA)
3349  			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3350  			    (unsigned long long)dma,
3351  			    le32_to_cpu(dma_desc->addr_hi),
3352  			    le32_to_cpu(dma_desc->addr_lo),
3353  			    le16_to_cpu(dma_desc->len),
3354  			    le16_to_cpu(dma_desc->cmd));
3355  		else
3356  			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3357  			    (unsigned long long)dma,
3358  			    le32_to_cpu(dma_desc->addr_lo),
3359  			    le16_to_cpu(dma_desc->len),
3360  			    le16_to_cpu(dma_desc->cmd));
3361  
3362  		desc += host->desc_sz;
3363  		dma += host->desc_sz;
3364  
3365  		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3366  			break;
3367  	}
3368  }
3369  
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3370  static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3371  {
3372  	/*
3373  	 * CMD19 generates _only_ Buffer Read Ready interrupt if
3374  	 * use sdhci_send_tuning.
3375  	 * Need to exclude this case: PIO mode and use mmc_send_tuning,
3376  	 * If not, sdhci_transfer_pio will never be called, make the
3377  	 * SDHCI_INT_DATA_AVAIL always there, stuck in irq storm.
3378  	 */
3379  	if (intmask & SDHCI_INT_DATA_AVAIL && !host->data) {
3380  		if (mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)))) {
3381  			host->tuning_done = 1;
3382  			wake_up(&host->buf_ready_int);
3383  			return;
3384  		}
3385  	}
3386  
3387  	if (!host->data) {
3388  		struct mmc_command *data_cmd = host->data_cmd;
3389  
3390  		/*
3391  		 * The "data complete" interrupt is also used to
3392  		 * indicate that a busy state has ended. See comment
3393  		 * above in sdhci_cmd_irq().
3394  		 */
3395  		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3396  			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3397  				host->data_cmd = NULL;
3398  				data_cmd->error = -ETIMEDOUT;
3399  				sdhci_err_stats_inc(host, CMD_TIMEOUT);
3400  				__sdhci_finish_mrq(host, data_cmd->mrq);
3401  				return;
3402  			}
3403  			if (intmask & SDHCI_INT_DATA_END) {
3404  				host->data_cmd = NULL;
3405  				/*
3406  				 * Some cards handle busy-end interrupt
3407  				 * before the command completed, so make
3408  				 * sure we do things in the proper order.
3409  				 */
3410  				if (host->cmd == data_cmd)
3411  					return;
3412  
3413  				__sdhci_finish_mrq(host, data_cmd->mrq);
3414  				return;
3415  			}
3416  		}
3417  
3418  		/*
3419  		 * SDHCI recovers from errors by resetting the cmd and data
3420  		 * circuits. Until that is done, there very well might be more
3421  		 * interrupts, so ignore them in that case.
3422  		 */
3423  		if (host->pending_reset)
3424  			return;
3425  
3426  		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3427  		       mmc_hostname(host->mmc), (unsigned)intmask);
3428  		sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3429  		sdhci_dumpregs(host);
3430  
3431  		return;
3432  	}
3433  
3434  	if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3435  		host->data->error = -ETIMEDOUT;
3436  		sdhci_err_stats_inc(host, DAT_TIMEOUT);
3437  	} else if (intmask & SDHCI_INT_DATA_END_BIT) {
3438  		host->data->error = -EILSEQ;
3439  		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3440  			sdhci_err_stats_inc(host, DAT_CRC);
3441  	} else if ((intmask & SDHCI_INT_DATA_CRC) &&
3442  		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3443  			!= MMC_BUS_TEST_R) {
3444  		host->data->error = -EILSEQ;
3445  		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3446  			sdhci_err_stats_inc(host, DAT_CRC);
3447  	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
3448  		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3449  		       intmask);
3450  		sdhci_adma_show_error(host);
3451  		sdhci_err_stats_inc(host, ADMA);
3452  		host->data->error = -EIO;
3453  		if (host->ops->adma_workaround)
3454  			host->ops->adma_workaround(host, intmask);
3455  	}
3456  
3457  	if (host->data->error)
3458  		sdhci_finish_data(host);
3459  	else {
3460  		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3461  			sdhci_transfer_pio(host);
3462  
3463  		/*
3464  		 * We currently don't do anything fancy with DMA
3465  		 * boundaries, but as we can't disable the feature
3466  		 * we need to at least restart the transfer.
3467  		 *
3468  		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3469  		 * should return a valid address to continue from, but as
3470  		 * some controllers are faulty, don't trust them.
3471  		 */
3472  		if (intmask & SDHCI_INT_DMA_END) {
3473  			dma_addr_t dmastart, dmanow;
3474  
3475  			dmastart = sdhci_sdma_address(host);
3476  			dmanow = dmastart + host->data->bytes_xfered;
3477  			/*
3478  			 * Force update to the next DMA block boundary.
3479  			 */
3480  			dmanow = (dmanow &
3481  				~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3482  				SDHCI_DEFAULT_BOUNDARY_SIZE;
3483  			host->data->bytes_xfered = dmanow - dmastart;
3484  			DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3485  			    &dmastart, host->data->bytes_xfered, &dmanow);
3486  			sdhci_set_sdma_addr(host, dmanow);
3487  		}
3488  
3489  		if (intmask & SDHCI_INT_DATA_END) {
3490  			if (host->cmd == host->data_cmd) {
3491  				/*
3492  				 * Data managed to finish before the
3493  				 * command completed. Make sure we do
3494  				 * things in the proper order.
3495  				 */
3496  				host->data_early = 1;
3497  			} else {
3498  				sdhci_finish_data(host);
3499  			}
3500  		}
3501  	}
3502  }
3503  
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3504  static inline bool sdhci_defer_done(struct sdhci_host *host,
3505  				    struct mmc_request *mrq)
3506  {
3507  	struct mmc_data *data = mrq->data;
3508  
3509  	return host->pending_reset || host->always_defer_done ||
3510  	       ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3511  		data->host_cookie == COOKIE_MAPPED);
3512  }
3513  
sdhci_irq(int irq,void * dev_id)3514  static irqreturn_t sdhci_irq(int irq, void *dev_id)
3515  {
3516  	struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3517  	irqreturn_t result = IRQ_NONE;
3518  	struct sdhci_host *host = dev_id;
3519  	u32 intmask, mask, unexpected = 0;
3520  	int max_loops = 16;
3521  	int i;
3522  
3523  	spin_lock(&host->lock);
3524  
3525  	if (host->runtime_suspended) {
3526  		spin_unlock(&host->lock);
3527  		return IRQ_NONE;
3528  	}
3529  
3530  	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3531  	if (!intmask || intmask == 0xffffffff) {
3532  		result = IRQ_NONE;
3533  		goto out;
3534  	}
3535  
3536  	do {
3537  		DBG("IRQ status 0x%08x\n", intmask);
3538  
3539  		if (host->ops->irq) {
3540  			intmask = host->ops->irq(host, intmask);
3541  			if (!intmask)
3542  				goto cont;
3543  		}
3544  
3545  		/* Clear selected interrupts. */
3546  		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3547  				  SDHCI_INT_BUS_POWER);
3548  		sdhci_writel(host, mask, SDHCI_INT_STATUS);
3549  
3550  		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3551  			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3552  				      SDHCI_CARD_PRESENT;
3553  
3554  			/*
3555  			 * There is a observation on i.mx esdhc.  INSERT
3556  			 * bit will be immediately set again when it gets
3557  			 * cleared, if a card is inserted.  We have to mask
3558  			 * the irq to prevent interrupt storm which will
3559  			 * freeze the system.  And the REMOVE gets the
3560  			 * same situation.
3561  			 *
3562  			 * More testing are needed here to ensure it works
3563  			 * for other platforms though.
3564  			 */
3565  			host->ier &= ~(SDHCI_INT_CARD_INSERT |
3566  				       SDHCI_INT_CARD_REMOVE);
3567  			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3568  					       SDHCI_INT_CARD_INSERT;
3569  			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3570  			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3571  
3572  			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3573  				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3574  
3575  			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3576  						       SDHCI_INT_CARD_REMOVE);
3577  			result = IRQ_WAKE_THREAD;
3578  		}
3579  
3580  		if (intmask & SDHCI_INT_CMD_MASK)
3581  			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3582  
3583  		if (intmask & SDHCI_INT_DATA_MASK)
3584  			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3585  
3586  		if (intmask & SDHCI_INT_BUS_POWER)
3587  			pr_err("%s: Card is consuming too much power!\n",
3588  				mmc_hostname(host->mmc));
3589  
3590  		if (intmask & SDHCI_INT_RETUNE)
3591  			mmc_retune_needed(host->mmc);
3592  
3593  		if ((intmask & SDHCI_INT_CARD_INT) &&
3594  		    (host->ier & SDHCI_INT_CARD_INT)) {
3595  			sdhci_enable_sdio_irq_nolock(host, false);
3596  			sdio_signal_irq(host->mmc);
3597  		}
3598  
3599  		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3600  			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3601  			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3602  			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3603  
3604  		if (intmask) {
3605  			unexpected |= intmask;
3606  			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3607  		}
3608  cont:
3609  		if (result == IRQ_NONE)
3610  			result = IRQ_HANDLED;
3611  
3612  		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3613  	} while (intmask && --max_loops);
3614  
3615  	/* Determine if mrqs can be completed immediately */
3616  	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3617  		struct mmc_request *mrq = host->mrqs_done[i];
3618  
3619  		if (!mrq)
3620  			continue;
3621  
3622  		if (sdhci_defer_done(host, mrq)) {
3623  			result = IRQ_WAKE_THREAD;
3624  		} else {
3625  			mrqs_done[i] = mrq;
3626  			host->mrqs_done[i] = NULL;
3627  		}
3628  	}
3629  out:
3630  	if (host->deferred_cmd)
3631  		result = IRQ_WAKE_THREAD;
3632  
3633  	spin_unlock(&host->lock);
3634  
3635  	/* Process mrqs ready for immediate completion */
3636  	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3637  		if (!mrqs_done[i])
3638  			continue;
3639  
3640  		if (host->ops->request_done)
3641  			host->ops->request_done(host, mrqs_done[i]);
3642  		else
3643  			mmc_request_done(host->mmc, mrqs_done[i]);
3644  	}
3645  
3646  	if (unexpected) {
3647  		pr_err("%s: Unexpected interrupt 0x%08x.\n",
3648  			   mmc_hostname(host->mmc), unexpected);
3649  		sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
3650  		sdhci_dumpregs(host);
3651  	}
3652  
3653  	return result;
3654  }
3655  
sdhci_thread_irq(int irq,void * dev_id)3656  static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3657  {
3658  	struct sdhci_host *host = dev_id;
3659  	struct mmc_command *cmd;
3660  	unsigned long flags;
3661  	u32 isr;
3662  
3663  	while (!sdhci_request_done(host))
3664  		;
3665  
3666  	spin_lock_irqsave(&host->lock, flags);
3667  
3668  	isr = host->thread_isr;
3669  	host->thread_isr = 0;
3670  
3671  	cmd = host->deferred_cmd;
3672  	if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3673  		sdhci_finish_mrq(host, cmd->mrq);
3674  
3675  	spin_unlock_irqrestore(&host->lock, flags);
3676  
3677  	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3678  		struct mmc_host *mmc = host->mmc;
3679  
3680  		mmc->ops->card_event(mmc);
3681  		mmc_detect_change(mmc, msecs_to_jiffies(200));
3682  	}
3683  
3684  	return IRQ_HANDLED;
3685  }
3686  
3687  /*****************************************************************************\
3688   *                                                                           *
3689   * Suspend/resume                                                            *
3690   *                                                                           *
3691  \*****************************************************************************/
3692  
3693  #ifdef CONFIG_PM
3694  
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3695  static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3696  {
3697  	return mmc_card_is_removable(host->mmc) &&
3698  	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3699  	       !mmc_can_gpio_cd(host->mmc);
3700  }
3701  
3702  /*
3703   * To enable wakeup events, the corresponding events have to be enabled in
3704   * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3705   * Table' in the SD Host Controller Standard Specification.
3706   * It is useless to restore SDHCI_INT_ENABLE state in
3707   * sdhci_disable_irq_wakeups() since it will be set by
3708   * sdhci_enable_card_detection() or sdhci_init().
3709   */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3710  static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3711  {
3712  	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3713  		  SDHCI_WAKE_ON_INT;
3714  	u32 irq_val = 0;
3715  	u8 wake_val = 0;
3716  	u8 val;
3717  
3718  	if (sdhci_cd_irq_can_wakeup(host)) {
3719  		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3720  		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3721  	}
3722  
3723  	if (mmc_card_wake_sdio_irq(host->mmc)) {
3724  		wake_val |= SDHCI_WAKE_ON_INT;
3725  		irq_val |= SDHCI_INT_CARD_INT;
3726  	}
3727  
3728  	if (!irq_val)
3729  		return false;
3730  
3731  	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3732  	val &= ~mask;
3733  	val |= wake_val;
3734  	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3735  
3736  	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3737  
3738  	host->irq_wake_enabled = !enable_irq_wake(host->irq);
3739  
3740  	return host->irq_wake_enabled;
3741  }
3742  
sdhci_disable_irq_wakeups(struct sdhci_host * host)3743  static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3744  {
3745  	u8 val;
3746  	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3747  			| SDHCI_WAKE_ON_INT;
3748  
3749  	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3750  	val &= ~mask;
3751  	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3752  
3753  	disable_irq_wake(host->irq);
3754  
3755  	host->irq_wake_enabled = false;
3756  }
3757  
sdhci_suspend_host(struct sdhci_host * host)3758  int sdhci_suspend_host(struct sdhci_host *host)
3759  {
3760  	sdhci_disable_card_detection(host);
3761  
3762  	mmc_retune_timer_stop(host->mmc);
3763  
3764  	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3765  	    !sdhci_enable_irq_wakeups(host)) {
3766  		host->ier = 0;
3767  		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3768  		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3769  		free_irq(host->irq, host);
3770  	}
3771  
3772  	return 0;
3773  }
3774  
3775  EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3776  
sdhci_resume_host(struct sdhci_host * host)3777  int sdhci_resume_host(struct sdhci_host *host)
3778  {
3779  	struct mmc_host *mmc = host->mmc;
3780  	int ret = 0;
3781  
3782  	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3783  		if (host->ops->enable_dma)
3784  			host->ops->enable_dma(host);
3785  	}
3786  
3787  	if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3788  	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3789  		/* Card keeps power but host controller does not */
3790  		sdhci_init(host, 0);
3791  		host->pwr = 0;
3792  		host->clock = 0;
3793  		host->reinit_uhs = true;
3794  		mmc->ops->set_ios(mmc, &mmc->ios);
3795  	} else {
3796  		sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3797  	}
3798  
3799  	if (host->irq_wake_enabled) {
3800  		sdhci_disable_irq_wakeups(host);
3801  	} else {
3802  		ret = request_threaded_irq(host->irq, sdhci_irq,
3803  					   sdhci_thread_irq, IRQF_SHARED,
3804  					   mmc_hostname(mmc), host);
3805  		if (ret)
3806  			return ret;
3807  	}
3808  
3809  	sdhci_enable_card_detection(host);
3810  
3811  	return ret;
3812  }
3813  
3814  EXPORT_SYMBOL_GPL(sdhci_resume_host);
3815  
sdhci_runtime_suspend_host(struct sdhci_host * host)3816  int sdhci_runtime_suspend_host(struct sdhci_host *host)
3817  {
3818  	unsigned long flags;
3819  
3820  	mmc_retune_timer_stop(host->mmc);
3821  
3822  	spin_lock_irqsave(&host->lock, flags);
3823  	host->ier &= SDHCI_INT_CARD_INT;
3824  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3825  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3826  	spin_unlock_irqrestore(&host->lock, flags);
3827  
3828  	synchronize_hardirq(host->irq);
3829  
3830  	spin_lock_irqsave(&host->lock, flags);
3831  	host->runtime_suspended = true;
3832  	spin_unlock_irqrestore(&host->lock, flags);
3833  
3834  	return 0;
3835  }
3836  EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3837  
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3838  int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3839  {
3840  	struct mmc_host *mmc = host->mmc;
3841  	unsigned long flags;
3842  	int host_flags = host->flags;
3843  
3844  	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3845  		if (host->ops->enable_dma)
3846  			host->ops->enable_dma(host);
3847  	}
3848  
3849  	sdhci_init(host, soft_reset);
3850  
3851  	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3852  	    mmc->ios.power_mode != MMC_POWER_OFF) {
3853  		/* Force clock and power re-program */
3854  		host->pwr = 0;
3855  		host->clock = 0;
3856  		host->reinit_uhs = true;
3857  		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3858  		mmc->ops->set_ios(mmc, &mmc->ios);
3859  
3860  		if ((host_flags & SDHCI_PV_ENABLED) &&
3861  		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3862  			spin_lock_irqsave(&host->lock, flags);
3863  			sdhci_enable_preset_value(host, true);
3864  			spin_unlock_irqrestore(&host->lock, flags);
3865  		}
3866  
3867  		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3868  		    mmc->ops->hs400_enhanced_strobe)
3869  			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3870  	}
3871  
3872  	spin_lock_irqsave(&host->lock, flags);
3873  
3874  	host->runtime_suspended = false;
3875  
3876  	/* Enable SDIO IRQ */
3877  	if (sdio_irq_claimed(mmc))
3878  		sdhci_enable_sdio_irq_nolock(host, true);
3879  
3880  	/* Enable Card Detection */
3881  	sdhci_enable_card_detection(host);
3882  
3883  	spin_unlock_irqrestore(&host->lock, flags);
3884  
3885  	return 0;
3886  }
3887  EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3888  
3889  #endif /* CONFIG_PM */
3890  
3891  /*****************************************************************************\
3892   *                                                                           *
3893   * Command Queue Engine (CQE) helpers                                        *
3894   *                                                                           *
3895  \*****************************************************************************/
3896  
sdhci_cqe_enable(struct mmc_host * mmc)3897  void sdhci_cqe_enable(struct mmc_host *mmc)
3898  {
3899  	struct sdhci_host *host = mmc_priv(mmc);
3900  	unsigned long flags;
3901  	u8 ctrl;
3902  
3903  	spin_lock_irqsave(&host->lock, flags);
3904  
3905  	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3906  	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3907  	/*
3908  	 * Host from V4.10 supports ADMA3 DMA type.
3909  	 * ADMA3 performs integrated descriptor which is more suitable
3910  	 * for cmd queuing to fetch both command and transfer descriptors.
3911  	 */
3912  	if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3913  		ctrl |= SDHCI_CTRL_ADMA3;
3914  	else if (host->flags & SDHCI_USE_64_BIT_DMA)
3915  		ctrl |= SDHCI_CTRL_ADMA64;
3916  	else
3917  		ctrl |= SDHCI_CTRL_ADMA32;
3918  	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3919  
3920  	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3921  		     SDHCI_BLOCK_SIZE);
3922  
3923  	/* Set maximum timeout */
3924  	sdhci_set_timeout(host, NULL);
3925  
3926  	host->ier = host->cqe_ier;
3927  
3928  	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3929  	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3930  
3931  	host->cqe_on = true;
3932  
3933  	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3934  		 mmc_hostname(mmc), host->ier,
3935  		 sdhci_readl(host, SDHCI_INT_STATUS));
3936  
3937  	spin_unlock_irqrestore(&host->lock, flags);
3938  }
3939  EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3940  
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3941  void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3942  {
3943  	struct sdhci_host *host = mmc_priv(mmc);
3944  	unsigned long flags;
3945  
3946  	spin_lock_irqsave(&host->lock, flags);
3947  
3948  	sdhci_set_default_irqs(host);
3949  
3950  	host->cqe_on = false;
3951  
3952  	if (recovery)
3953  		sdhci_reset_for(host, CQE_RECOVERY);
3954  
3955  	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3956  		 mmc_hostname(mmc), host->ier,
3957  		 sdhci_readl(host, SDHCI_INT_STATUS));
3958  
3959  	spin_unlock_irqrestore(&host->lock, flags);
3960  }
3961  EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3962  
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3963  bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3964  		   int *data_error)
3965  {
3966  	u32 mask;
3967  
3968  	if (!host->cqe_on)
3969  		return false;
3970  
3971  	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC)) {
3972  		*cmd_error = -EILSEQ;
3973  		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3974  			sdhci_err_stats_inc(host, CMD_CRC);
3975  	} else if (intmask & SDHCI_INT_TIMEOUT) {
3976  		*cmd_error = -ETIMEDOUT;
3977  		sdhci_err_stats_inc(host, CMD_TIMEOUT);
3978  	} else
3979  		*cmd_error = 0;
3980  
3981  	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC)) {
3982  		*data_error = -EILSEQ;
3983  		if (!mmc_op_tuning(SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))))
3984  			sdhci_err_stats_inc(host, DAT_CRC);
3985  	} else if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3986  		*data_error = -ETIMEDOUT;
3987  		sdhci_err_stats_inc(host, DAT_TIMEOUT);
3988  	} else if (intmask & SDHCI_INT_ADMA_ERROR) {
3989  		*data_error = -EIO;
3990  		sdhci_err_stats_inc(host, ADMA);
3991  	} else
3992  		*data_error = 0;
3993  
3994  	/* Clear selected interrupts. */
3995  	mask = intmask & host->cqe_ier;
3996  	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3997  
3998  	if (intmask & SDHCI_INT_BUS_POWER)
3999  		pr_err("%s: Card is consuming too much power!\n",
4000  		       mmc_hostname(host->mmc));
4001  
4002  	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
4003  	if (intmask) {
4004  		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
4005  		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
4006  		       mmc_hostname(host->mmc), intmask);
4007  		sdhci_err_stats_inc(host, UNEXPECTED_IRQ);
4008  		sdhci_dumpregs(host);
4009  	}
4010  
4011  	return true;
4012  }
4013  EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
4014  
4015  /*****************************************************************************\
4016   *                                                                           *
4017   * Device allocation/registration                                            *
4018   *                                                                           *
4019  \*****************************************************************************/
4020  
sdhci_alloc_host(struct device * dev,size_t priv_size)4021  struct sdhci_host *sdhci_alloc_host(struct device *dev,
4022  	size_t priv_size)
4023  {
4024  	struct mmc_host *mmc;
4025  	struct sdhci_host *host;
4026  
4027  	WARN_ON(dev == NULL);
4028  
4029  	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
4030  	if (!mmc)
4031  		return ERR_PTR(-ENOMEM);
4032  
4033  	host = mmc_priv(mmc);
4034  	host->mmc = mmc;
4035  	host->mmc_host_ops = sdhci_ops;
4036  	mmc->ops = &host->mmc_host_ops;
4037  
4038  	host->flags = SDHCI_SIGNALING_330;
4039  
4040  	host->cqe_ier     = SDHCI_CQE_INT_MASK;
4041  	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
4042  
4043  	host->tuning_delay = -1;
4044  	host->tuning_loop_count = MAX_TUNING_LOOP;
4045  
4046  	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
4047  
4048  	/*
4049  	 * The DMA table descriptor count is calculated as the maximum
4050  	 * number of segments times 2, to allow for an alignment
4051  	 * descriptor for each segment, plus 1 for a nop end descriptor.
4052  	 */
4053  	host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
4054  	host->max_adma = 65536;
4055  
4056  	host->max_timeout_count = 0xE;
4057  
4058  	return host;
4059  }
4060  
4061  EXPORT_SYMBOL_GPL(sdhci_alloc_host);
4062  
sdhci_set_dma_mask(struct sdhci_host * host)4063  static int sdhci_set_dma_mask(struct sdhci_host *host)
4064  {
4065  	struct mmc_host *mmc = host->mmc;
4066  	struct device *dev = mmc_dev(mmc);
4067  	int ret = -EINVAL;
4068  
4069  	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
4070  		host->flags &= ~SDHCI_USE_64_BIT_DMA;
4071  
4072  	/* Try 64-bit mask if hardware is capable  of it */
4073  	if (host->flags & SDHCI_USE_64_BIT_DMA) {
4074  		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4075  		if (ret) {
4076  			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
4077  				mmc_hostname(mmc));
4078  			host->flags &= ~SDHCI_USE_64_BIT_DMA;
4079  		}
4080  	}
4081  
4082  	/* 32-bit mask as default & fallback */
4083  	if (ret) {
4084  		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
4085  		if (ret)
4086  			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
4087  				mmc_hostname(mmc));
4088  	}
4089  
4090  	return ret;
4091  }
4092  
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)4093  void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
4094  		       const u32 *caps, const u32 *caps1)
4095  {
4096  	u16 v;
4097  	u64 dt_caps_mask = 0;
4098  	u64 dt_caps = 0;
4099  
4100  	if (host->read_caps)
4101  		return;
4102  
4103  	host->read_caps = true;
4104  
4105  	if (debug_quirks)
4106  		host->quirks = debug_quirks;
4107  
4108  	if (debug_quirks2)
4109  		host->quirks2 = debug_quirks2;
4110  
4111  	sdhci_reset_for_all(host);
4112  
4113  	if (host->v4_mode)
4114  		sdhci_do_enable_v4_mode(host);
4115  
4116  	device_property_read_u64(mmc_dev(host->mmc),
4117  				 "sdhci-caps-mask", &dt_caps_mask);
4118  	device_property_read_u64(mmc_dev(host->mmc),
4119  				 "sdhci-caps", &dt_caps);
4120  
4121  	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4122  	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4123  
4124  	if (caps) {
4125  		host->caps = *caps;
4126  	} else {
4127  		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4128  		host->caps &= ~lower_32_bits(dt_caps_mask);
4129  		host->caps |= lower_32_bits(dt_caps);
4130  	}
4131  
4132  	if (host->version < SDHCI_SPEC_300)
4133  		return;
4134  
4135  	if (caps1) {
4136  		host->caps1 = *caps1;
4137  	} else {
4138  		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4139  		host->caps1 &= ~upper_32_bits(dt_caps_mask);
4140  		host->caps1 |= upper_32_bits(dt_caps);
4141  	}
4142  }
4143  EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4144  
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4145  static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4146  {
4147  	struct mmc_host *mmc = host->mmc;
4148  	unsigned int max_blocks;
4149  	unsigned int bounce_size;
4150  	int ret;
4151  
4152  	/*
4153  	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4154  	 * has diminishing returns, this is probably because SD/MMC
4155  	 * cards are usually optimized to handle this size of requests.
4156  	 */
4157  	bounce_size = SZ_64K;
4158  	/*
4159  	 * Adjust downwards to maximum request size if this is less
4160  	 * than our segment size, else hammer down the maximum
4161  	 * request size to the maximum buffer size.
4162  	 */
4163  	if (mmc->max_req_size < bounce_size)
4164  		bounce_size = mmc->max_req_size;
4165  	max_blocks = bounce_size / 512;
4166  
4167  	/*
4168  	 * When we just support one segment, we can get significant
4169  	 * speedups by the help of a bounce buffer to group scattered
4170  	 * reads/writes together.
4171  	 */
4172  	host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4173  					   bounce_size,
4174  					   GFP_KERNEL);
4175  	if (!host->bounce_buffer) {
4176  		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4177  		       mmc_hostname(mmc),
4178  		       bounce_size);
4179  		/*
4180  		 * Exiting with zero here makes sure we proceed with
4181  		 * mmc->max_segs == 1.
4182  		 */
4183  		return;
4184  	}
4185  
4186  	host->bounce_addr = dma_map_single(mmc_dev(mmc),
4187  					   host->bounce_buffer,
4188  					   bounce_size,
4189  					   DMA_BIDIRECTIONAL);
4190  	ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4191  	if (ret) {
4192  		devm_kfree(mmc_dev(mmc), host->bounce_buffer);
4193  		host->bounce_buffer = NULL;
4194  		/* Again fall back to max_segs == 1 */
4195  		return;
4196  	}
4197  
4198  	host->bounce_buffer_size = bounce_size;
4199  
4200  	/* Lie about this since we're bouncing */
4201  	mmc->max_segs = max_blocks;
4202  	mmc->max_seg_size = bounce_size;
4203  	mmc->max_req_size = bounce_size;
4204  
4205  	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4206  		mmc_hostname(mmc), max_blocks, bounce_size);
4207  }
4208  
sdhci_can_64bit_dma(struct sdhci_host * host)4209  static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4210  {
4211  	/*
4212  	 * According to SD Host Controller spec v4.10, bit[27] added from
4213  	 * version 4.10 in Capabilities Register is used as 64-bit System
4214  	 * Address support for V4 mode.
4215  	 */
4216  	if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4217  		return host->caps & SDHCI_CAN_64BIT_V4;
4218  
4219  	return host->caps & SDHCI_CAN_64BIT;
4220  }
4221  
sdhci_setup_host(struct sdhci_host * host)4222  int sdhci_setup_host(struct sdhci_host *host)
4223  {
4224  	struct mmc_host *mmc;
4225  	u32 max_current_caps;
4226  	unsigned int ocr_avail;
4227  	unsigned int override_timeout_clk;
4228  	u32 max_clk;
4229  	int ret = 0;
4230  	bool enable_vqmmc = false;
4231  
4232  	WARN_ON(host == NULL);
4233  	if (host == NULL)
4234  		return -EINVAL;
4235  
4236  	mmc = host->mmc;
4237  
4238  	/*
4239  	 * If there are external regulators, get them. Note this must be done
4240  	 * early before resetting the host and reading the capabilities so that
4241  	 * the host can take the appropriate action if regulators are not
4242  	 * available.
4243  	 */
4244  	if (!mmc->supply.vqmmc) {
4245  		ret = mmc_regulator_get_supply(mmc);
4246  		if (ret)
4247  			return ret;
4248  		enable_vqmmc  = true;
4249  	}
4250  
4251  	DBG("Version:   0x%08x | Present:  0x%08x\n",
4252  	    sdhci_readw(host, SDHCI_HOST_VERSION),
4253  	    sdhci_readl(host, SDHCI_PRESENT_STATE));
4254  	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
4255  	    sdhci_readl(host, SDHCI_CAPABILITIES),
4256  	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
4257  
4258  	sdhci_read_caps(host);
4259  
4260  	override_timeout_clk = host->timeout_clk;
4261  
4262  	if (host->version > SDHCI_SPEC_420) {
4263  		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4264  		       mmc_hostname(mmc), host->version);
4265  	}
4266  
4267  	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4268  		host->flags |= SDHCI_USE_SDMA;
4269  	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4270  		DBG("Controller doesn't have SDMA capability\n");
4271  	else
4272  		host->flags |= SDHCI_USE_SDMA;
4273  
4274  	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4275  		(host->flags & SDHCI_USE_SDMA)) {
4276  		DBG("Disabling DMA as it is marked broken\n");
4277  		host->flags &= ~SDHCI_USE_SDMA;
4278  	}
4279  
4280  	if ((host->version >= SDHCI_SPEC_200) &&
4281  		(host->caps & SDHCI_CAN_DO_ADMA2))
4282  		host->flags |= SDHCI_USE_ADMA;
4283  
4284  	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4285  		(host->flags & SDHCI_USE_ADMA)) {
4286  		DBG("Disabling ADMA as it is marked broken\n");
4287  		host->flags &= ~SDHCI_USE_ADMA;
4288  	}
4289  
4290  	if (sdhci_can_64bit_dma(host))
4291  		host->flags |= SDHCI_USE_64_BIT_DMA;
4292  
4293  	if (host->use_external_dma) {
4294  		ret = sdhci_external_dma_init(host);
4295  		if (ret == -EPROBE_DEFER)
4296  			goto unreg;
4297  		/*
4298  		 * Fall back to use the DMA/PIO integrated in standard SDHCI
4299  		 * instead of external DMA devices.
4300  		 */
4301  		else if (ret)
4302  			sdhci_switch_external_dma(host, false);
4303  		/* Disable internal DMA sources */
4304  		else
4305  			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4306  	}
4307  
4308  	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4309  		if (host->ops->set_dma_mask)
4310  			ret = host->ops->set_dma_mask(host);
4311  		else
4312  			ret = sdhci_set_dma_mask(host);
4313  
4314  		if (!ret && host->ops->enable_dma)
4315  			ret = host->ops->enable_dma(host);
4316  
4317  		if (ret) {
4318  			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4319  				mmc_hostname(mmc));
4320  			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4321  
4322  			ret = 0;
4323  		}
4324  	}
4325  
4326  	/* SDMA does not support 64-bit DMA if v4 mode not set */
4327  	if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4328  		host->flags &= ~SDHCI_USE_SDMA;
4329  
4330  	if (host->flags & SDHCI_USE_ADMA) {
4331  		dma_addr_t dma;
4332  		void *buf;
4333  
4334  		if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4335  			host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4336  		else if (!host->alloc_desc_sz)
4337  			host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4338  
4339  		host->desc_sz = host->alloc_desc_sz;
4340  		host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4341  
4342  		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4343  		/*
4344  		 * Use zalloc to zero the reserved high 32-bits of 128-bit
4345  		 * descriptors so that they never need to be written.
4346  		 */
4347  		buf = dma_alloc_coherent(mmc_dev(mmc),
4348  					 host->align_buffer_sz + host->adma_table_sz,
4349  					 &dma, GFP_KERNEL);
4350  		if (!buf) {
4351  			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4352  				mmc_hostname(mmc));
4353  			host->flags &= ~SDHCI_USE_ADMA;
4354  		} else if ((dma + host->align_buffer_sz) &
4355  			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4356  			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4357  				mmc_hostname(mmc));
4358  			host->flags &= ~SDHCI_USE_ADMA;
4359  			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4360  					  host->adma_table_sz, buf, dma);
4361  		} else {
4362  			host->align_buffer = buf;
4363  			host->align_addr = dma;
4364  
4365  			host->adma_table = buf + host->align_buffer_sz;
4366  			host->adma_addr = dma + host->align_buffer_sz;
4367  		}
4368  	}
4369  
4370  	/*
4371  	 * If we use DMA, then it's up to the caller to set the DMA
4372  	 * mask, but PIO does not need the hw shim so we set a new
4373  	 * mask here in that case.
4374  	 */
4375  	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4376  		host->dma_mask = DMA_BIT_MASK(64);
4377  		mmc_dev(mmc)->dma_mask = &host->dma_mask;
4378  	}
4379  
4380  	if (host->version >= SDHCI_SPEC_300)
4381  		host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4382  	else
4383  		host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4384  
4385  	host->max_clk *= 1000000;
4386  	if (host->max_clk == 0 || host->quirks &
4387  			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4388  		if (!host->ops->get_max_clock) {
4389  			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4390  			       mmc_hostname(mmc));
4391  			ret = -ENODEV;
4392  			goto undma;
4393  		}
4394  		host->max_clk = host->ops->get_max_clock(host);
4395  	}
4396  
4397  	/*
4398  	 * In case of Host Controller v3.00, find out whether clock
4399  	 * multiplier is supported.
4400  	 */
4401  	host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4402  
4403  	/*
4404  	 * In case the value in Clock Multiplier is 0, then programmable
4405  	 * clock mode is not supported, otherwise the actual clock
4406  	 * multiplier is one more than the value of Clock Multiplier
4407  	 * in the Capabilities Register.
4408  	 */
4409  	if (host->clk_mul)
4410  		host->clk_mul += 1;
4411  
4412  	/*
4413  	 * Set host parameters.
4414  	 */
4415  	max_clk = host->max_clk;
4416  
4417  	if (host->ops->get_min_clock)
4418  		mmc->f_min = host->ops->get_min_clock(host);
4419  	else if (host->version >= SDHCI_SPEC_300) {
4420  		if (host->clk_mul)
4421  			max_clk = host->max_clk * host->clk_mul;
4422  		/*
4423  		 * Divided Clock Mode minimum clock rate is always less than
4424  		 * Programmable Clock Mode minimum clock rate.
4425  		 */
4426  		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4427  	} else
4428  		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4429  
4430  	if (!mmc->f_max || mmc->f_max > max_clk)
4431  		mmc->f_max = max_clk;
4432  
4433  	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4434  		host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4435  
4436  		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4437  			host->timeout_clk *= 1000;
4438  
4439  		if (host->timeout_clk == 0) {
4440  			if (!host->ops->get_timeout_clock) {
4441  				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4442  					mmc_hostname(mmc));
4443  				ret = -ENODEV;
4444  				goto undma;
4445  			}
4446  
4447  			host->timeout_clk =
4448  				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4449  					     1000);
4450  		}
4451  
4452  		if (override_timeout_clk)
4453  			host->timeout_clk = override_timeout_clk;
4454  
4455  		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4456  			host->ops->get_max_timeout_count(host) : 1 << 27;
4457  		mmc->max_busy_timeout /= host->timeout_clk;
4458  	}
4459  
4460  	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4461  	    !host->ops->get_max_timeout_count)
4462  		mmc->max_busy_timeout = 0;
4463  
4464  	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4465  	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4466  
4467  	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4468  		host->flags |= SDHCI_AUTO_CMD12;
4469  
4470  	/*
4471  	 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4472  	 * For v4 mode, SDMA may use Auto-CMD23 as well.
4473  	 */
4474  	if ((host->version >= SDHCI_SPEC_300) &&
4475  	    ((host->flags & SDHCI_USE_ADMA) ||
4476  	     !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4477  	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4478  		host->flags |= SDHCI_AUTO_CMD23;
4479  		DBG("Auto-CMD23 available\n");
4480  	} else {
4481  		DBG("Auto-CMD23 unavailable\n");
4482  	}
4483  
4484  	/*
4485  	 * A controller may support 8-bit width, but the board itself
4486  	 * might not have the pins brought out.  Boards that support
4487  	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4488  	 * their platform code before calling sdhci_add_host(), and we
4489  	 * won't assume 8-bit width for hosts without that CAP.
4490  	 */
4491  	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4492  		mmc->caps |= MMC_CAP_4_BIT_DATA;
4493  
4494  	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4495  		mmc->caps &= ~MMC_CAP_CMD23;
4496  
4497  	if (host->caps & SDHCI_CAN_DO_HISPD)
4498  		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4499  
4500  	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4501  	    mmc_card_is_removable(mmc) &&
4502  	    mmc_gpio_get_cd(mmc) < 0)
4503  		mmc->caps |= MMC_CAP_NEEDS_POLL;
4504  
4505  	if (!IS_ERR(mmc->supply.vqmmc)) {
4506  		if (enable_vqmmc) {
4507  			ret = regulator_enable(mmc->supply.vqmmc);
4508  			host->sdhci_core_to_disable_vqmmc = !ret;
4509  		}
4510  
4511  		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
4512  		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4513  						    1950000))
4514  			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4515  					 SDHCI_SUPPORT_SDR50 |
4516  					 SDHCI_SUPPORT_DDR50);
4517  
4518  		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
4519  		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4520  						    3600000))
4521  			host->flags &= ~SDHCI_SIGNALING_330;
4522  
4523  		if (ret) {
4524  			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4525  				mmc_hostname(mmc), ret);
4526  			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4527  		}
4528  
4529  	}
4530  
4531  	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4532  		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4533  				 SDHCI_SUPPORT_DDR50);
4534  		/*
4535  		 * The SDHCI controller in a SoC might support HS200/HS400
4536  		 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4537  		 * but if the board is modeled such that the IO lines are not
4538  		 * connected to 1.8v then HS200/HS400 cannot be supported.
4539  		 * Disable HS200/HS400 if the board does not have 1.8v connected
4540  		 * to the IO lines. (Applicable for other modes in 1.8v)
4541  		 */
4542  		mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4543  		mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4544  	}
4545  
4546  	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4547  	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4548  			   SDHCI_SUPPORT_DDR50))
4549  		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4550  
4551  	/* SDR104 supports also implies SDR50 support */
4552  	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4553  		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4554  		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
4555  		 * field can be promoted to support HS200.
4556  		 */
4557  		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4558  			mmc->caps2 |= MMC_CAP2_HS200;
4559  	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4560  		mmc->caps |= MMC_CAP_UHS_SDR50;
4561  	}
4562  
4563  	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4564  	    (host->caps1 & SDHCI_SUPPORT_HS400))
4565  		mmc->caps2 |= MMC_CAP2_HS400;
4566  
4567  	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4568  	    (IS_ERR(mmc->supply.vqmmc) ||
4569  	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4570  					     1300000)))
4571  		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4572  
4573  	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4574  	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4575  		mmc->caps |= MMC_CAP_UHS_DDR50;
4576  
4577  	/* Does the host need tuning for SDR50? */
4578  	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4579  		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4580  
4581  	/* Driver Type(s) (A, C, D) supported by the host */
4582  	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4583  		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4584  	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4585  		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4586  	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4587  		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4588  
4589  	/* Initial value for re-tuning timer count */
4590  	host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4591  				       host->caps1);
4592  
4593  	/*
4594  	 * In case Re-tuning Timer is not disabled, the actual value of
4595  	 * re-tuning timer will be 2 ^ (n - 1).
4596  	 */
4597  	if (host->tuning_count)
4598  		host->tuning_count = 1 << (host->tuning_count - 1);
4599  
4600  	/* Re-tuning mode supported by the Host Controller */
4601  	host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4602  
4603  	ocr_avail = 0;
4604  
4605  	/*
4606  	 * According to SD Host Controller spec v3.00, if the Host System
4607  	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4608  	 * the value is meaningful only if Voltage Support in the Capabilities
4609  	 * register is set. The actual current value is 4 times the register
4610  	 * value.
4611  	 */
4612  	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4613  	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4614  		int curr = regulator_get_current_limit(mmc->supply.vmmc);
4615  		if (curr > 0) {
4616  
4617  			/* convert to SDHCI_MAX_CURRENT format */
4618  			curr = curr/1000;  /* convert to mA */
4619  			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4620  
4621  			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4622  			max_current_caps =
4623  				FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4624  				FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4625  				FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4626  		}
4627  	}
4628  
4629  	if (host->caps & SDHCI_CAN_VDD_330) {
4630  		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4631  
4632  		mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4633  						 max_current_caps) *
4634  						SDHCI_MAX_CURRENT_MULTIPLIER;
4635  	}
4636  	if (host->caps & SDHCI_CAN_VDD_300) {
4637  		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4638  
4639  		mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4640  						 max_current_caps) *
4641  						SDHCI_MAX_CURRENT_MULTIPLIER;
4642  	}
4643  	if (host->caps & SDHCI_CAN_VDD_180) {
4644  		ocr_avail |= MMC_VDD_165_195;
4645  
4646  		mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4647  						 max_current_caps) *
4648  						SDHCI_MAX_CURRENT_MULTIPLIER;
4649  	}
4650  
4651  	/* If OCR set by host, use it instead. */
4652  	if (host->ocr_mask)
4653  		ocr_avail = host->ocr_mask;
4654  
4655  	/* If OCR set by external regulators, give it highest prio. */
4656  	if (mmc->ocr_avail)
4657  		ocr_avail = mmc->ocr_avail;
4658  
4659  	mmc->ocr_avail = ocr_avail;
4660  	mmc->ocr_avail_sdio = ocr_avail;
4661  	if (host->ocr_avail_sdio)
4662  		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4663  	mmc->ocr_avail_sd = ocr_avail;
4664  	if (host->ocr_avail_sd)
4665  		mmc->ocr_avail_sd &= host->ocr_avail_sd;
4666  	else /* normal SD controllers don't support 1.8V */
4667  		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4668  	mmc->ocr_avail_mmc = ocr_avail;
4669  	if (host->ocr_avail_mmc)
4670  		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4671  
4672  	if (mmc->ocr_avail == 0) {
4673  		pr_err("%s: Hardware doesn't report any support voltages.\n",
4674  		       mmc_hostname(mmc));
4675  		ret = -ENODEV;
4676  		goto unreg;
4677  	}
4678  
4679  	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4680  			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4681  			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4682  	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4683  		host->flags |= SDHCI_SIGNALING_180;
4684  
4685  	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4686  		host->flags |= SDHCI_SIGNALING_120;
4687  
4688  	spin_lock_init(&host->lock);
4689  
4690  	/*
4691  	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4692  	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4693  	 * is less anyway.
4694  	 */
4695  	mmc->max_req_size = 524288;
4696  
4697  	/*
4698  	 * Maximum number of segments. Depends on if the hardware
4699  	 * can do scatter/gather or not.
4700  	 */
4701  	if (host->flags & SDHCI_USE_ADMA) {
4702  		mmc->max_segs = SDHCI_MAX_SEGS;
4703  	} else if (host->flags & SDHCI_USE_SDMA) {
4704  		mmc->max_segs = 1;
4705  		mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4706  					  dma_max_mapping_size(mmc_dev(mmc)));
4707  	} else { /* PIO */
4708  		mmc->max_segs = SDHCI_MAX_SEGS;
4709  	}
4710  
4711  	/*
4712  	 * Maximum segment size. Could be one segment with the maximum number
4713  	 * of bytes. When doing hardware scatter/gather, each entry cannot
4714  	 * be larger than 64 KiB though.
4715  	 */
4716  	if (host->flags & SDHCI_USE_ADMA) {
4717  		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
4718  			host->max_adma = 65532; /* 32-bit alignment */
4719  			mmc->max_seg_size = 65535;
4720  		} else {
4721  			mmc->max_seg_size = 65536;
4722  		}
4723  	} else {
4724  		mmc->max_seg_size = mmc->max_req_size;
4725  	}
4726  
4727  	/*
4728  	 * Maximum block size. This varies from controller to controller and
4729  	 * is specified in the capabilities register.
4730  	 */
4731  	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4732  		mmc->max_blk_size = 2;
4733  	} else {
4734  		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4735  				SDHCI_MAX_BLOCK_SHIFT;
4736  		if (mmc->max_blk_size >= 3) {
4737  			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4738  				mmc_hostname(mmc));
4739  			mmc->max_blk_size = 0;
4740  		}
4741  	}
4742  
4743  	mmc->max_blk_size = 512 << mmc->max_blk_size;
4744  
4745  	/*
4746  	 * Maximum block count.
4747  	 */
4748  	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4749  
4750  	if (mmc->max_segs == 1)
4751  		/* This may alter mmc->*_blk_* parameters */
4752  		sdhci_allocate_bounce_buffer(host);
4753  
4754  	return 0;
4755  
4756  unreg:
4757  	if (host->sdhci_core_to_disable_vqmmc)
4758  		regulator_disable(mmc->supply.vqmmc);
4759  undma:
4760  	if (host->align_buffer)
4761  		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4762  				  host->adma_table_sz, host->align_buffer,
4763  				  host->align_addr);
4764  	host->adma_table = NULL;
4765  	host->align_buffer = NULL;
4766  
4767  	return ret;
4768  }
4769  EXPORT_SYMBOL_GPL(sdhci_setup_host);
4770  
sdhci_cleanup_host(struct sdhci_host * host)4771  void sdhci_cleanup_host(struct sdhci_host *host)
4772  {
4773  	struct mmc_host *mmc = host->mmc;
4774  
4775  	if (host->sdhci_core_to_disable_vqmmc)
4776  		regulator_disable(mmc->supply.vqmmc);
4777  
4778  	if (host->align_buffer)
4779  		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4780  				  host->adma_table_sz, host->align_buffer,
4781  				  host->align_addr);
4782  
4783  	if (host->use_external_dma)
4784  		sdhci_external_dma_release(host);
4785  
4786  	host->adma_table = NULL;
4787  	host->align_buffer = NULL;
4788  }
4789  EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4790  
__sdhci_add_host(struct sdhci_host * host)4791  int __sdhci_add_host(struct sdhci_host *host)
4792  {
4793  	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4794  	struct mmc_host *mmc = host->mmc;
4795  	int ret;
4796  
4797  	if ((mmc->caps2 & MMC_CAP2_CQE) &&
4798  	    (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4799  		mmc->caps2 &= ~MMC_CAP2_CQE;
4800  		mmc->cqe_ops = NULL;
4801  	}
4802  
4803  	host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4804  	if (!host->complete_wq)
4805  		return -ENOMEM;
4806  
4807  	INIT_WORK(&host->complete_work, sdhci_complete_work);
4808  
4809  	timer_setup(&host->timer, sdhci_timeout_timer, 0);
4810  	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4811  
4812  	init_waitqueue_head(&host->buf_ready_int);
4813  
4814  	sdhci_init(host, 0);
4815  
4816  	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4817  				   IRQF_SHARED,	mmc_hostname(mmc), host);
4818  	if (ret) {
4819  		pr_err("%s: Failed to request IRQ %d: %d\n",
4820  		       mmc_hostname(mmc), host->irq, ret);
4821  		goto unwq;
4822  	}
4823  
4824  	ret = sdhci_led_register(host);
4825  	if (ret) {
4826  		pr_err("%s: Failed to register LED device: %d\n",
4827  		       mmc_hostname(mmc), ret);
4828  		goto unirq;
4829  	}
4830  
4831  	ret = mmc_add_host(mmc);
4832  	if (ret)
4833  		goto unled;
4834  
4835  	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4836  		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4837  		host->use_external_dma ? "External DMA" :
4838  		(host->flags & SDHCI_USE_ADMA) ?
4839  		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4840  		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4841  
4842  	sdhci_enable_card_detection(host);
4843  
4844  	return 0;
4845  
4846  unled:
4847  	sdhci_led_unregister(host);
4848  unirq:
4849  	sdhci_reset_for_all(host);
4850  	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4851  	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4852  	free_irq(host->irq, host);
4853  unwq:
4854  	destroy_workqueue(host->complete_wq);
4855  
4856  	return ret;
4857  }
4858  EXPORT_SYMBOL_GPL(__sdhci_add_host);
4859  
sdhci_add_host(struct sdhci_host * host)4860  int sdhci_add_host(struct sdhci_host *host)
4861  {
4862  	int ret;
4863  
4864  	ret = sdhci_setup_host(host);
4865  	if (ret)
4866  		return ret;
4867  
4868  	ret = __sdhci_add_host(host);
4869  	if (ret)
4870  		goto cleanup;
4871  
4872  	return 0;
4873  
4874  cleanup:
4875  	sdhci_cleanup_host(host);
4876  
4877  	return ret;
4878  }
4879  EXPORT_SYMBOL_GPL(sdhci_add_host);
4880  
sdhci_remove_host(struct sdhci_host * host,int dead)4881  void sdhci_remove_host(struct sdhci_host *host, int dead)
4882  {
4883  	struct mmc_host *mmc = host->mmc;
4884  	unsigned long flags;
4885  
4886  	if (dead) {
4887  		spin_lock_irqsave(&host->lock, flags);
4888  
4889  		host->flags |= SDHCI_DEVICE_DEAD;
4890  
4891  		if (sdhci_has_requests(host)) {
4892  			pr_err("%s: Controller removed during "
4893  				" transfer!\n", mmc_hostname(mmc));
4894  			sdhci_error_out_mrqs(host, -ENOMEDIUM);
4895  		}
4896  
4897  		spin_unlock_irqrestore(&host->lock, flags);
4898  	}
4899  
4900  	sdhci_disable_card_detection(host);
4901  
4902  	mmc_remove_host(mmc);
4903  
4904  	sdhci_led_unregister(host);
4905  
4906  	if (!dead)
4907  		sdhci_reset_for_all(host);
4908  
4909  	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4910  	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4911  	free_irq(host->irq, host);
4912  
4913  	del_timer_sync(&host->timer);
4914  	del_timer_sync(&host->data_timer);
4915  
4916  	destroy_workqueue(host->complete_wq);
4917  
4918  	if (host->sdhci_core_to_disable_vqmmc)
4919  		regulator_disable(mmc->supply.vqmmc);
4920  
4921  	if (host->align_buffer)
4922  		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4923  				  host->adma_table_sz, host->align_buffer,
4924  				  host->align_addr);
4925  
4926  	if (host->use_external_dma)
4927  		sdhci_external_dma_release(host);
4928  
4929  	host->adma_table = NULL;
4930  	host->align_buffer = NULL;
4931  }
4932  
4933  EXPORT_SYMBOL_GPL(sdhci_remove_host);
4934  
sdhci_free_host(struct sdhci_host * host)4935  void sdhci_free_host(struct sdhci_host *host)
4936  {
4937  	mmc_free_host(host->mmc);
4938  }
4939  
4940  EXPORT_SYMBOL_GPL(sdhci_free_host);
4941  
4942  /*****************************************************************************\
4943   *                                                                           *
4944   * Driver init/exit                                                          *
4945   *                                                                           *
4946  \*****************************************************************************/
4947  
sdhci_drv_init(void)4948  static int __init sdhci_drv_init(void)
4949  {
4950  	pr_info(DRIVER_NAME
4951  		": Secure Digital Host Controller Interface driver\n");
4952  	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4953  
4954  	return 0;
4955  }
4956  
sdhci_drv_exit(void)4957  static void __exit sdhci_drv_exit(void)
4958  {
4959  }
4960  
4961  module_init(sdhci_drv_init);
4962  module_exit(sdhci_drv_exit);
4963  
4964  module_param(debug_quirks, uint, 0444);
4965  module_param(debug_quirks2, uint, 0444);
4966  
4967  MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4968  MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4969  MODULE_LICENSE("GPL");
4970  
4971  MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4972  MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4973