1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * drivers/ata/sata_dwc_460ex.c
4   *
5   * Synopsys DesignWare Cores (DWC) SATA host driver
6   *
7   * Author: Mark Miesfeld <mmiesfeld@amcc.com>
8   *
9   * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
10   * Copyright 2008 DENX Software Engineering
11   *
12   * Based on versions provided by AMCC and Synopsys which are:
13   *          Copyright 2006 Applied Micro Circuits Corporation
14   *          COPYRIGHT (C) 2005  SYNOPSYS, INC.  ALL RIGHTS RESERVED
15   */
16  
17  #include <linux/kernel.h>
18  #include <linux/module.h>
19  #include <linux/device.h>
20  #include <linux/dmaengine.h>
21  #include <linux/of_address.h>
22  #include <linux/of_irq.h>
23  #include <linux/of_platform.h>
24  #include <linux/platform_device.h>
25  #include <linux/phy/phy.h>
26  #include <linux/libata.h>
27  #include <linux/slab.h>
28  #include <trace/events/libata.h>
29  
30  #include "libata.h"
31  
32  #include <scsi/scsi_host.h>
33  #include <scsi/scsi_cmnd.h>
34  
35  /* These two are defined in "libata.h" */
36  #undef	DRV_NAME
37  #undef	DRV_VERSION
38  
39  #define DRV_NAME        "sata-dwc"
40  #define DRV_VERSION     "1.3"
41  
42  #define sata_dwc_writel(a, v)	writel_relaxed(v, a)
43  #define sata_dwc_readl(a)	readl_relaxed(a)
44  
45  #ifndef NO_IRQ
46  #define NO_IRQ		0
47  #endif
48  
49  #define AHB_DMA_BRST_DFLT	64	/* 16 data items burst length */
50  
51  enum {
52  	SATA_DWC_MAX_PORTS = 1,
53  
54  	SATA_DWC_SCR_OFFSET = 0x24,
55  	SATA_DWC_REG_OFFSET = 0x64,
56  };
57  
58  /* DWC SATA Registers */
59  struct sata_dwc_regs {
60  	u32 fptagr;		/* 1st party DMA tag */
61  	u32 fpbor;		/* 1st party DMA buffer offset */
62  	u32 fptcr;		/* 1st party DMA Xfr count */
63  	u32 dmacr;		/* DMA Control */
64  	u32 dbtsr;		/* DMA Burst Transac size */
65  	u32 intpr;		/* Interrupt Pending */
66  	u32 intmr;		/* Interrupt Mask */
67  	u32 errmr;		/* Error Mask */
68  	u32 llcr;		/* Link Layer Control */
69  	u32 phycr;		/* PHY Control */
70  	u32 physr;		/* PHY Status */
71  	u32 rxbistpd;		/* Recvd BIST pattern def register */
72  	u32 rxbistpd1;		/* Recvd BIST data dword1 */
73  	u32 rxbistpd2;		/* Recvd BIST pattern data dword2 */
74  	u32 txbistpd;		/* Trans BIST pattern def register */
75  	u32 txbistpd1;		/* Trans BIST data dword1 */
76  	u32 txbistpd2;		/* Trans BIST data dword2 */
77  	u32 bistcr;		/* BIST Control Register */
78  	u32 bistfctr;		/* BIST FIS Count Register */
79  	u32 bistsr;		/* BIST Status Register */
80  	u32 bistdecr;		/* BIST Dword Error count register */
81  	u32 res[15];		/* Reserved locations */
82  	u32 testr;		/* Test Register */
83  	u32 versionr;		/* Version Register */
84  	u32 idr;		/* ID Register */
85  	u32 unimpl[192];	/* Unimplemented */
86  	u32 dmadr[256];		/* FIFO Locations in DMA Mode */
87  };
88  
89  enum {
90  	SCR_SCONTROL_DET_ENABLE	=	0x00000001,
91  	SCR_SSTATUS_DET_PRESENT	=	0x00000001,
92  	SCR_SERROR_DIAG_X	=	0x04000000,
93  /* DWC SATA Register Operations */
94  	SATA_DWC_TXFIFO_DEPTH	=	0x01FF,
95  	SATA_DWC_RXFIFO_DEPTH	=	0x01FF,
96  	SATA_DWC_DMACR_TMOD_TXCHEN =	0x00000004,
97  	SATA_DWC_DMACR_TXCHEN	= (0x00000001 | SATA_DWC_DMACR_TMOD_TXCHEN),
98  	SATA_DWC_DMACR_RXCHEN	= (0x00000002 | SATA_DWC_DMACR_TMOD_TXCHEN),
99  	SATA_DWC_DMACR_TXRXCH_CLEAR =	SATA_DWC_DMACR_TMOD_TXCHEN,
100  	SATA_DWC_INTPR_DMAT	=	0x00000001,
101  	SATA_DWC_INTPR_NEWFP	=	0x00000002,
102  	SATA_DWC_INTPR_PMABRT	=	0x00000004,
103  	SATA_DWC_INTPR_ERR	=	0x00000008,
104  	SATA_DWC_INTPR_NEWBIST	=	0x00000010,
105  	SATA_DWC_INTPR_IPF	=	0x10000000,
106  	SATA_DWC_INTMR_DMATM	=	0x00000001,
107  	SATA_DWC_INTMR_NEWFPM	=	0x00000002,
108  	SATA_DWC_INTMR_PMABRTM	=	0x00000004,
109  	SATA_DWC_INTMR_ERRM	=	0x00000008,
110  	SATA_DWC_INTMR_NEWBISTM	=	0x00000010,
111  	SATA_DWC_LLCR_SCRAMEN	=	0x00000001,
112  	SATA_DWC_LLCR_DESCRAMEN	=	0x00000002,
113  	SATA_DWC_LLCR_RPDEN	=	0x00000004,
114  /* This is all error bits, zero's are reserved fields. */
115  	SATA_DWC_SERROR_ERR_BITS =	0x0FFF0F03
116  };
117  
118  #define SATA_DWC_SCR0_SPD_GET(v)	(((v) >> 4) & 0x0000000F)
119  #define SATA_DWC_DMACR_TX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_TXCHEN) |\
120  						 SATA_DWC_DMACR_TMOD_TXCHEN)
121  #define SATA_DWC_DMACR_RX_CLEAR(v)	(((v) & ~SATA_DWC_DMACR_RXCHEN) |\
122  						 SATA_DWC_DMACR_TMOD_TXCHEN)
123  #define SATA_DWC_DBTSR_MWR(size)	(((size)/4) & SATA_DWC_TXFIFO_DEPTH)
124  #define SATA_DWC_DBTSR_MRD(size)	((((size)/4) & SATA_DWC_RXFIFO_DEPTH)\
125  						 << 16)
126  struct sata_dwc_device {
127  	struct device		*dev;		/* generic device struct */
128  	struct ata_probe_ent	*pe;		/* ptr to probe-ent */
129  	struct ata_host		*host;
130  	struct sata_dwc_regs __iomem *sata_dwc_regs;	/* DW SATA specific */
131  	u32			sactive_issued;
132  	u32			sactive_queued;
133  	struct phy		*phy;
134  	phys_addr_t		dmadr;
135  #ifdef CONFIG_SATA_DWC_OLD_DMA
136  	struct dw_dma_chip	*dma;
137  #endif
138  };
139  
140  /*
141   * Allow one extra special slot for commands and DMA management
142   * to account for libata internal commands.
143   */
144  #define SATA_DWC_QCMD_MAX	(ATA_MAX_QUEUE + 1)
145  
146  struct sata_dwc_device_port {
147  	struct sata_dwc_device	*hsdev;
148  	int			cmd_issued[SATA_DWC_QCMD_MAX];
149  	int			dma_pending[SATA_DWC_QCMD_MAX];
150  
151  	/* DMA info */
152  	struct dma_chan			*chan;
153  	struct dma_async_tx_descriptor	*desc[SATA_DWC_QCMD_MAX];
154  	u32				dma_interrupt_count;
155  };
156  
157  /*
158   * Commonly used DWC SATA driver macros
159   */
160  #define HSDEV_FROM_HOST(host)	((struct sata_dwc_device *)(host)->private_data)
161  #define HSDEV_FROM_AP(ap)	((struct sata_dwc_device *)(ap)->host->private_data)
162  #define HSDEVP_FROM_AP(ap)	((struct sata_dwc_device_port *)(ap)->private_data)
163  #define HSDEV_FROM_QC(qc)	((struct sata_dwc_device *)(qc)->ap->host->private_data)
164  #define HSDEV_FROM_HSDEVP(p)	((struct sata_dwc_device *)(p)->hsdev)
165  
166  enum {
167  	SATA_DWC_CMD_ISSUED_NOT		= 0,
168  	SATA_DWC_CMD_ISSUED_PEND	= 1,
169  	SATA_DWC_CMD_ISSUED_EXEC	= 2,
170  	SATA_DWC_CMD_ISSUED_NODATA	= 3,
171  
172  	SATA_DWC_DMA_PENDING_NONE	= 0,
173  	SATA_DWC_DMA_PENDING_TX		= 1,
174  	SATA_DWC_DMA_PENDING_RX		= 2,
175  };
176  
177  /*
178   * Prototypes
179   */
180  static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
181  static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
182  static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
183  static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
184  
185  #ifdef CONFIG_SATA_DWC_OLD_DMA
186  
187  #include <linux/platform_data/dma-dw.h>
188  #include <linux/dma/dw.h>
189  
190  static struct dw_dma_slave sata_dwc_dma_dws = {
191  	.src_id = 0,
192  	.dst_id = 0,
193  	.m_master = 1,
194  	.p_master = 0,
195  };
196  
sata_dwc_dma_filter(struct dma_chan * chan,void * param)197  static bool sata_dwc_dma_filter(struct dma_chan *chan, void *param)
198  {
199  	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
200  
201  	if (dws->dma_dev != chan->device->dev)
202  		return false;
203  
204  	chan->private = dws;
205  	return true;
206  }
207  
sata_dwc_dma_get_channel_old(struct sata_dwc_device_port * hsdevp)208  static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
209  {
210  	struct sata_dwc_device *hsdev = hsdevp->hsdev;
211  	struct dw_dma_slave *dws = &sata_dwc_dma_dws;
212  	struct device *dev = hsdev->dev;
213  	dma_cap_mask_t mask;
214  
215  	dws->dma_dev = dev;
216  
217  	dma_cap_zero(mask);
218  	dma_cap_set(DMA_SLAVE, mask);
219  
220  	/* Acquire DMA channel */
221  	hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
222  	if (!hsdevp->chan) {
223  		dev_err(dev, "%s: dma channel unavailable\n", __func__);
224  		return -EAGAIN;
225  	}
226  
227  	return 0;
228  }
229  
sata_dwc_dma_init_old(struct platform_device * pdev,struct sata_dwc_device * hsdev)230  static int sata_dwc_dma_init_old(struct platform_device *pdev,
231  				 struct sata_dwc_device *hsdev)
232  {
233  	struct device *dev = &pdev->dev;
234  	struct device_node *np = dev->of_node;
235  
236  	hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
237  	if (!hsdev->dma)
238  		return -ENOMEM;
239  
240  	hsdev->dma->dev = dev;
241  	hsdev->dma->id = pdev->id;
242  
243  	/* Get SATA DMA interrupt number */
244  	hsdev->dma->irq = irq_of_parse_and_map(np, 1);
245  	if (hsdev->dma->irq == NO_IRQ) {
246  		dev_err(dev, "no SATA DMA irq\n");
247  		return -ENODEV;
248  	}
249  
250  	/* Get physical SATA DMA register base address */
251  	hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
252  	if (IS_ERR(hsdev->dma->regs))
253  		return PTR_ERR(hsdev->dma->regs);
254  
255  	/* Initialize AHB DMAC */
256  	return dw_dma_probe(hsdev->dma);
257  }
258  
sata_dwc_dma_exit_old(struct sata_dwc_device * hsdev)259  static void sata_dwc_dma_exit_old(struct sata_dwc_device *hsdev)
260  {
261  	if (!hsdev->dma)
262  		return;
263  
264  	dw_dma_remove(hsdev->dma);
265  }
266  
267  #endif
268  
get_prot_descript(u8 protocol)269  static const char *get_prot_descript(u8 protocol)
270  {
271  	switch (protocol) {
272  	case ATA_PROT_NODATA:
273  		return "ATA no data";
274  	case ATA_PROT_PIO:
275  		return "ATA PIO";
276  	case ATA_PROT_DMA:
277  		return "ATA DMA";
278  	case ATA_PROT_NCQ:
279  		return "ATA NCQ";
280  	case ATA_PROT_NCQ_NODATA:
281  		return "ATA NCQ no data";
282  	case ATAPI_PROT_NODATA:
283  		return "ATAPI no data";
284  	case ATAPI_PROT_PIO:
285  		return "ATAPI PIO";
286  	case ATAPI_PROT_DMA:
287  		return "ATAPI DMA";
288  	default:
289  		return "unknown";
290  	}
291  }
292  
dma_dwc_xfer_done(void * hsdev_instance)293  static void dma_dwc_xfer_done(void *hsdev_instance)
294  {
295  	unsigned long flags;
296  	struct sata_dwc_device *hsdev = hsdev_instance;
297  	struct ata_host *host = (struct ata_host *)hsdev->host;
298  	struct ata_port *ap;
299  	struct sata_dwc_device_port *hsdevp;
300  	u8 tag = 0;
301  	unsigned int port = 0;
302  
303  	spin_lock_irqsave(&host->lock, flags);
304  	ap = host->ports[port];
305  	hsdevp = HSDEVP_FROM_AP(ap);
306  	tag = ap->link.active_tag;
307  
308  	/*
309  	 * Each DMA command produces 2 interrupts.  Only
310  	 * complete the command after both interrupts have been
311  	 * seen. (See sata_dwc_isr())
312  	 */
313  	hsdevp->dma_interrupt_count++;
314  	sata_dwc_clear_dmacr(hsdevp, tag);
315  
316  	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
317  		dev_err(ap->dev, "DMA not pending tag=0x%02x pending=%d\n",
318  			tag, hsdevp->dma_pending[tag]);
319  	}
320  
321  	if ((hsdevp->dma_interrupt_count % 2) == 0)
322  		sata_dwc_dma_xfer_complete(ap);
323  
324  	spin_unlock_irqrestore(&host->lock, flags);
325  }
326  
dma_dwc_xfer_setup(struct ata_queued_cmd * qc)327  static struct dma_async_tx_descriptor *dma_dwc_xfer_setup(struct ata_queued_cmd *qc)
328  {
329  	struct ata_port *ap = qc->ap;
330  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
331  	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
332  	struct dma_slave_config sconf;
333  	struct dma_async_tx_descriptor *desc;
334  
335  	if (qc->dma_dir == DMA_DEV_TO_MEM) {
336  		sconf.src_addr = hsdev->dmadr;
337  		sconf.device_fc = false;
338  	} else {	/* DMA_MEM_TO_DEV */
339  		sconf.dst_addr = hsdev->dmadr;
340  		sconf.device_fc = false;
341  	}
342  
343  	sconf.direction = qc->dma_dir;
344  	sconf.src_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
345  	sconf.dst_maxburst = AHB_DMA_BRST_DFLT / 4;	/* in items */
346  	sconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
347  	sconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
348  
349  	dmaengine_slave_config(hsdevp->chan, &sconf);
350  
351  	/* Convert SG list to linked list of items (LLIs) for AHB DMA */
352  	desc = dmaengine_prep_slave_sg(hsdevp->chan, qc->sg, qc->n_elem,
353  				       qc->dma_dir,
354  				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
355  
356  	if (!desc)
357  		return NULL;
358  
359  	desc->callback = dma_dwc_xfer_done;
360  	desc->callback_param = hsdev;
361  
362  	dev_dbg(hsdev->dev, "%s sg: 0x%p, count: %d addr: %pa\n", __func__,
363  		qc->sg, qc->n_elem, &hsdev->dmadr);
364  
365  	return desc;
366  }
367  
sata_dwc_scr_read(struct ata_link * link,unsigned int scr,u32 * val)368  static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
369  {
370  	if (scr > SCR_NOTIFICATION) {
371  		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
372  			__func__, scr);
373  		return -EINVAL;
374  	}
375  
376  	*val = sata_dwc_readl(link->ap->ioaddr.scr_addr + (scr * 4));
377  	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
378  		link->ap->print_id, scr, *val);
379  
380  	return 0;
381  }
382  
sata_dwc_scr_write(struct ata_link * link,unsigned int scr,u32 val)383  static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
384  {
385  	dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=0x%08x\n", __func__,
386  		link->ap->print_id, scr, val);
387  	if (scr > SCR_NOTIFICATION) {
388  		dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
389  			 __func__, scr);
390  		return -EINVAL;
391  	}
392  	sata_dwc_writel(link->ap->ioaddr.scr_addr + (scr * 4), val);
393  
394  	return 0;
395  }
396  
clear_serror(struct ata_port * ap)397  static void clear_serror(struct ata_port *ap)
398  {
399  	u32 val;
400  	sata_dwc_scr_read(&ap->link, SCR_ERROR, &val);
401  	sata_dwc_scr_write(&ap->link, SCR_ERROR, val);
402  }
403  
clear_interrupt_bit(struct sata_dwc_device * hsdev,u32 bit)404  static void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
405  {
406  	sata_dwc_writel(&hsdev->sata_dwc_regs->intpr,
407  			sata_dwc_readl(&hsdev->sata_dwc_regs->intpr));
408  }
409  
qcmd_tag_to_mask(u8 tag)410  static u32 qcmd_tag_to_mask(u8 tag)
411  {
412  	return 0x00000001 << (tag & 0x1f);
413  }
414  
415  /* See ahci.c */
sata_dwc_error_intr(struct ata_port * ap,struct sata_dwc_device * hsdev,uint intpr)416  static void sata_dwc_error_intr(struct ata_port *ap,
417  				struct sata_dwc_device *hsdev, uint intpr)
418  {
419  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
420  	struct ata_eh_info *ehi = &ap->link.eh_info;
421  	unsigned int err_mask = 0, action = 0;
422  	struct ata_queued_cmd *qc;
423  	u32 serror;
424  	u8 status, tag;
425  
426  	ata_ehi_clear_desc(ehi);
427  
428  	sata_dwc_scr_read(&ap->link, SCR_ERROR, &serror);
429  	status = ap->ops->sff_check_status(ap);
430  
431  	tag = ap->link.active_tag;
432  
433  	dev_err(ap->dev,
434  		"%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x dma_intp=%d pending=%d issued=%d",
435  		__func__, serror, intpr, status, hsdevp->dma_interrupt_count,
436  		hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag]);
437  
438  	/* Clear error register and interrupt bit */
439  	clear_serror(ap);
440  	clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
441  
442  	/* This is the only error happening now.  TODO check for exact error */
443  
444  	err_mask |= AC_ERR_HOST_BUS;
445  	action |= ATA_EH_RESET;
446  
447  	/* Pass this on to EH */
448  	ehi->serror |= serror;
449  	ehi->action |= action;
450  
451  	qc = ata_qc_from_tag(ap, tag);
452  	if (qc)
453  		qc->err_mask |= err_mask;
454  	else
455  		ehi->err_mask |= err_mask;
456  
457  	ata_port_abort(ap);
458  }
459  
460  /*
461   * Function : sata_dwc_isr
462   * arguments : irq, void *dev_instance, struct pt_regs *regs
463   * Return value : irqreturn_t - status of IRQ
464   * This Interrupt handler called via port ops registered function.
465   * .irq_handler = sata_dwc_isr
466   */
sata_dwc_isr(int irq,void * dev_instance)467  static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
468  {
469  	struct ata_host *host = (struct ata_host *)dev_instance;
470  	struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
471  	struct ata_port *ap;
472  	struct ata_queued_cmd *qc;
473  	unsigned long flags;
474  	u8 status, tag;
475  	int handled, num_processed, port = 0;
476  	uint intpr, sactive, sactive2, tag_mask;
477  	struct sata_dwc_device_port *hsdevp;
478  	hsdev->sactive_issued = 0;
479  
480  	spin_lock_irqsave(&host->lock, flags);
481  
482  	/* Read the interrupt register */
483  	intpr = sata_dwc_readl(&hsdev->sata_dwc_regs->intpr);
484  
485  	ap = host->ports[port];
486  	hsdevp = HSDEVP_FROM_AP(ap);
487  
488  	dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
489  		ap->link.active_tag);
490  
491  	/* Check for error interrupt */
492  	if (intpr & SATA_DWC_INTPR_ERR) {
493  		sata_dwc_error_intr(ap, hsdev, intpr);
494  		handled = 1;
495  		goto DONE;
496  	}
497  
498  	/* Check for DMA SETUP FIS (FP DMA) interrupt */
499  	if (intpr & SATA_DWC_INTPR_NEWFP) {
500  		clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
501  
502  		tag = (u8)(sata_dwc_readl(&hsdev->sata_dwc_regs->fptagr));
503  		dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
504  		if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PEND)
505  			dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
506  
507  		hsdev->sactive_issued |= qcmd_tag_to_mask(tag);
508  
509  		qc = ata_qc_from_tag(ap, tag);
510  		if (unlikely(!qc)) {
511  			dev_err(ap->dev, "failed to get qc");
512  			handled = 1;
513  			goto DONE;
514  		}
515  		/*
516  		 * Start FP DMA for NCQ command.  At this point the tag is the
517  		 * active tag.  It is the tag that matches the command about to
518  		 * be completed.
519  		 */
520  		trace_ata_bmdma_start(ap, &qc->tf, tag);
521  		qc->ap->link.active_tag = tag;
522  		sata_dwc_bmdma_start_by_tag(qc, tag);
523  
524  		handled = 1;
525  		goto DONE;
526  	}
527  	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
528  	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
529  
530  	/* If no sactive issued and tag_mask is zero then this is not NCQ */
531  	if (hsdev->sactive_issued == 0 && tag_mask == 0) {
532  		if (ap->link.active_tag == ATA_TAG_POISON)
533  			tag = 0;
534  		else
535  			tag = ap->link.active_tag;
536  		qc = ata_qc_from_tag(ap, tag);
537  
538  		/* DEV interrupt w/ no active qc? */
539  		if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
540  			dev_err(ap->dev,
541  				"%s interrupt with no active qc qc=%p\n",
542  				__func__, qc);
543  			ap->ops->sff_check_status(ap);
544  			handled = 1;
545  			goto DONE;
546  		}
547  		status = ap->ops->sff_check_status(ap);
548  
549  		qc->ap->link.active_tag = tag;
550  		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
551  
552  		if (status & ATA_ERR) {
553  			dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
554  			sata_dwc_qc_complete(ap, qc);
555  			handled = 1;
556  			goto DONE;
557  		}
558  
559  		dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
560  			__func__, get_prot_descript(qc->tf.protocol));
561  DRVSTILLBUSY:
562  		if (ata_is_dma(qc->tf.protocol)) {
563  			/*
564  			 * Each DMA transaction produces 2 interrupts. The DMAC
565  			 * transfer complete interrupt and the SATA controller
566  			 * operation done interrupt. The command should be
567  			 * completed only after both interrupts are seen.
568  			 */
569  			hsdevp->dma_interrupt_count++;
570  			if (hsdevp->dma_pending[tag] == \
571  					SATA_DWC_DMA_PENDING_NONE) {
572  				dev_err(ap->dev,
573  					"%s: DMA not pending intpr=0x%08x status=0x%08x pending=%d\n",
574  					__func__, intpr, status,
575  					hsdevp->dma_pending[tag]);
576  			}
577  
578  			if ((hsdevp->dma_interrupt_count % 2) == 0)
579  				sata_dwc_dma_xfer_complete(ap);
580  		} else if (ata_is_pio(qc->tf.protocol)) {
581  			ata_sff_hsm_move(ap, qc, status, 0);
582  			handled = 1;
583  			goto DONE;
584  		} else {
585  			if (unlikely(sata_dwc_qc_complete(ap, qc)))
586  				goto DRVSTILLBUSY;
587  		}
588  
589  		handled = 1;
590  		goto DONE;
591  	}
592  
593  	/*
594  	 * This is a NCQ command. At this point we need to figure out for which
595  	 * tags we have gotten a completion interrupt.  One interrupt may serve
596  	 * as completion for more than one operation when commands are queued
597  	 * (NCQ).  We need to process each completed command.
598  	 */
599  
600  	 /* process completed commands */
601  	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
602  	tag_mask = (hsdev->sactive_issued | sactive) ^ sactive;
603  
604  	if (sactive != 0 || hsdev->sactive_issued > 1 || tag_mask > 1) {
605  		dev_dbg(ap->dev,
606  			"%s NCQ:sactive=0x%08x  sactive_issued=0x%08x tag_mask=0x%08x\n",
607  			__func__, sactive, hsdev->sactive_issued, tag_mask);
608  	}
609  
610  	if ((tag_mask | hsdev->sactive_issued) != hsdev->sactive_issued) {
611  		dev_warn(ap->dev,
612  			 "Bad tag mask?  sactive=0x%08x sactive_issued=0x%08x  tag_mask=0x%08x\n",
613  			 sactive, hsdev->sactive_issued, tag_mask);
614  	}
615  
616  	/* read just to clear ... not bad if currently still busy */
617  	status = ap->ops->sff_check_status(ap);
618  	dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
619  
620  	tag = 0;
621  	num_processed = 0;
622  	while (tag_mask) {
623  		num_processed++;
624  		while (!(tag_mask & 0x00000001)) {
625  			tag++;
626  			tag_mask <<= 1;
627  		}
628  
629  		tag_mask &= (~0x00000001);
630  		qc = ata_qc_from_tag(ap, tag);
631  		if (unlikely(!qc)) {
632  			dev_err(ap->dev, "failed to get qc");
633  			handled = 1;
634  			goto DONE;
635  		}
636  
637  		/* To be picked up by completion functions */
638  		qc->ap->link.active_tag = tag;
639  		hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
640  
641  		/* Let libata/scsi layers handle error */
642  		if (status & ATA_ERR) {
643  			dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
644  				status);
645  			sata_dwc_qc_complete(ap, qc);
646  			handled = 1;
647  			goto DONE;
648  		}
649  
650  		/* Process completed command */
651  		dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
652  			get_prot_descript(qc->tf.protocol));
653  		if (ata_is_dma(qc->tf.protocol)) {
654  			hsdevp->dma_interrupt_count++;
655  			if (hsdevp->dma_pending[tag] == \
656  					SATA_DWC_DMA_PENDING_NONE)
657  				dev_warn(ap->dev, "%s: DMA not pending?\n",
658  					__func__);
659  			if ((hsdevp->dma_interrupt_count % 2) == 0)
660  				sata_dwc_dma_xfer_complete(ap);
661  		} else {
662  			if (unlikely(sata_dwc_qc_complete(ap, qc)))
663  				goto STILLBUSY;
664  		}
665  		continue;
666  
667  STILLBUSY:
668  		ap->stats.idle_irq++;
669  		dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
670  			ap->print_id);
671  	} /* while tag_mask */
672  
673  	/*
674  	 * Check to see if any commands completed while we were processing our
675  	 * initial set of completed commands (read status clears interrupts,
676  	 * so we might miss a completed command interrupt if one came in while
677  	 * we were processing --we read status as part of processing a completed
678  	 * command).
679  	 */
680  	sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive2);
681  	if (sactive2 != sactive) {
682  		dev_dbg(ap->dev,
683  			"More completed - sactive=0x%x sactive2=0x%x\n",
684  			sactive, sactive2);
685  	}
686  	handled = 1;
687  
688  DONE:
689  	spin_unlock_irqrestore(&host->lock, flags);
690  	return IRQ_RETVAL(handled);
691  }
692  
sata_dwc_clear_dmacr(struct sata_dwc_device_port * hsdevp,u8 tag)693  static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
694  {
695  	struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
696  	u32 dmacr = sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr);
697  
698  	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
699  		dmacr = SATA_DWC_DMACR_RX_CLEAR(dmacr);
700  		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
701  	} else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
702  		dmacr = SATA_DWC_DMACR_TX_CLEAR(dmacr);
703  		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr, dmacr);
704  	} else {
705  		/*
706  		 * This should not happen, it indicates the driver is out of
707  		 * sync.  If it does happen, clear dmacr anyway.
708  		 */
709  		dev_err(hsdev->dev,
710  			"%s DMA protocol RX and TX DMA not pending tag=0x%02x pending=%d dmacr: 0x%08x\n",
711  			__func__, tag, hsdevp->dma_pending[tag], dmacr);
712  		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
713  				SATA_DWC_DMACR_TXRXCH_CLEAR);
714  	}
715  }
716  
sata_dwc_dma_xfer_complete(struct ata_port * ap)717  static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
718  {
719  	struct ata_queued_cmd *qc;
720  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
721  	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
722  	u8 tag = 0;
723  
724  	tag = ap->link.active_tag;
725  	qc = ata_qc_from_tag(ap, tag);
726  	if (!qc) {
727  		dev_err(ap->dev, "failed to get qc");
728  		return;
729  	}
730  
731  	if (ata_is_dma(qc->tf.protocol)) {
732  		if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
733  			dev_err(ap->dev,
734  				"%s DMA protocol RX and TX DMA not pending dmacr: 0x%08x\n",
735  				__func__,
736  				sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
737  		}
738  
739  		hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
740  		sata_dwc_qc_complete(ap, qc);
741  		ap->link.active_tag = ATA_TAG_POISON;
742  	} else {
743  		sata_dwc_qc_complete(ap, qc);
744  	}
745  }
746  
sata_dwc_qc_complete(struct ata_port * ap,struct ata_queued_cmd * qc)747  static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
748  {
749  	u8 status = 0;
750  	u32 mask = 0x0;
751  	u8 tag = qc->hw_tag;
752  	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
753  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
754  	hsdev->sactive_queued = 0;
755  
756  	if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
757  		dev_err(ap->dev, "TX DMA PENDING\n");
758  	else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX)
759  		dev_err(ap->dev, "RX DMA PENDING\n");
760  	dev_dbg(ap->dev,
761  		"QC complete cmd=0x%02x status=0x%02x ata%u: protocol=%d\n",
762  		qc->tf.command, status, ap->print_id, qc->tf.protocol);
763  
764  	/* clear active bit */
765  	mask = (~(qcmd_tag_to_mask(tag)));
766  	hsdev->sactive_queued = hsdev->sactive_queued & mask;
767  	hsdev->sactive_issued = hsdev->sactive_issued & mask;
768  	ata_qc_complete(qc);
769  	return 0;
770  }
771  
sata_dwc_enable_interrupts(struct sata_dwc_device * hsdev)772  static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
773  {
774  	/* Enable selective interrupts by setting the interrupt maskregister*/
775  	sata_dwc_writel(&hsdev->sata_dwc_regs->intmr,
776  			SATA_DWC_INTMR_ERRM |
777  			SATA_DWC_INTMR_NEWFPM |
778  			SATA_DWC_INTMR_PMABRTM |
779  			SATA_DWC_INTMR_DMATM);
780  	/*
781  	 * Unmask the error bits that should trigger an error interrupt by
782  	 * setting the error mask register.
783  	 */
784  	sata_dwc_writel(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
785  
786  	dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n",
787  		 __func__, sata_dwc_readl(&hsdev->sata_dwc_regs->intmr),
788  		sata_dwc_readl(&hsdev->sata_dwc_regs->errmr));
789  }
790  
sata_dwc_setup_port(struct ata_ioports * port,void __iomem * base)791  static void sata_dwc_setup_port(struct ata_ioports *port, void __iomem *base)
792  {
793  	port->cmd_addr		= base + 0x00;
794  	port->data_addr		= base + 0x00;
795  
796  	port->error_addr	= base + 0x04;
797  	port->feature_addr	= base + 0x04;
798  
799  	port->nsect_addr	= base + 0x08;
800  
801  	port->lbal_addr		= base + 0x0c;
802  	port->lbam_addr		= base + 0x10;
803  	port->lbah_addr		= base + 0x14;
804  
805  	port->device_addr	= base + 0x18;
806  	port->command_addr	= base + 0x1c;
807  	port->status_addr	= base + 0x1c;
808  
809  	port->altstatus_addr	= base + 0x20;
810  	port->ctl_addr		= base + 0x20;
811  }
812  
sata_dwc_dma_get_channel(struct sata_dwc_device_port * hsdevp)813  static int sata_dwc_dma_get_channel(struct sata_dwc_device_port *hsdevp)
814  {
815  	struct sata_dwc_device *hsdev = hsdevp->hsdev;
816  	struct device *dev = hsdev->dev;
817  
818  #ifdef CONFIG_SATA_DWC_OLD_DMA
819  	if (!of_find_property(dev->of_node, "dmas", NULL))
820  		return sata_dwc_dma_get_channel_old(hsdevp);
821  #endif
822  
823  	hsdevp->chan = dma_request_chan(dev, "sata-dma");
824  	if (IS_ERR(hsdevp->chan)) {
825  		dev_err(dev, "failed to allocate dma channel: %ld\n",
826  			PTR_ERR(hsdevp->chan));
827  		return PTR_ERR(hsdevp->chan);
828  	}
829  
830  	return 0;
831  }
832  
833  /*
834   * Function : sata_dwc_port_start
835   * arguments : struct ata_ioports *port
836   * Return value : returns 0 if success, error code otherwise
837   * This function allocates the scatter gather LLI table for AHB DMA
838   */
sata_dwc_port_start(struct ata_port * ap)839  static int sata_dwc_port_start(struct ata_port *ap)
840  {
841  	int err = 0;
842  	struct sata_dwc_device *hsdev;
843  	struct sata_dwc_device_port *hsdevp = NULL;
844  	struct device *pdev;
845  	int i;
846  
847  	hsdev = HSDEV_FROM_AP(ap);
848  
849  	dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
850  
851  	hsdev->host = ap->host;
852  	pdev = ap->host->dev;
853  	if (!pdev) {
854  		dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
855  		err = -ENODEV;
856  		goto CLEANUP;
857  	}
858  
859  	/* Allocate Port Struct */
860  	hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
861  	if (!hsdevp) {
862  		err = -ENOMEM;
863  		goto CLEANUP;
864  	}
865  	hsdevp->hsdev = hsdev;
866  
867  	err = sata_dwc_dma_get_channel(hsdevp);
868  	if (err)
869  		goto CLEANUP_ALLOC;
870  
871  	err = phy_power_on(hsdev->phy);
872  	if (err)
873  		goto CLEANUP_ALLOC;
874  
875  	for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
876  		hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
877  
878  	ap->bmdma_prd = NULL;	/* set these so libata doesn't use them */
879  	ap->bmdma_prd_dma = 0;
880  
881  	if (ap->port_no == 0)  {
882  		dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
883  			__func__);
884  		sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
885  				SATA_DWC_DMACR_TXRXCH_CLEAR);
886  
887  		dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n",
888  			 __func__);
889  		sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
890  				(SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
891  				 SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
892  	}
893  
894  	/* Clear any error bits before libata starts issuing commands */
895  	clear_serror(ap);
896  	ap->private_data = hsdevp;
897  	dev_dbg(ap->dev, "%s: done\n", __func__);
898  	return 0;
899  
900  CLEANUP_ALLOC:
901  	kfree(hsdevp);
902  CLEANUP:
903  	dev_dbg(ap->dev, "%s: fail. ap->id = %d\n", __func__, ap->print_id);
904  	return err;
905  }
906  
sata_dwc_port_stop(struct ata_port * ap)907  static void sata_dwc_port_stop(struct ata_port *ap)
908  {
909  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
910  	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
911  
912  	dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
913  
914  	dmaengine_terminate_sync(hsdevp->chan);
915  	dma_release_channel(hsdevp->chan);
916  	phy_power_off(hsdev->phy);
917  
918  	kfree(hsdevp);
919  	ap->private_data = NULL;
920  }
921  
922  /*
923   * Function : sata_dwc_exec_command_by_tag
924   * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
925   * Return value : None
926   * This function keeps track of individual command tag ids and calls
927   * ata_exec_command in libata
928   */
sata_dwc_exec_command_by_tag(struct ata_port * ap,struct ata_taskfile * tf,u8 tag,u32 cmd_issued)929  static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
930  					 struct ata_taskfile *tf,
931  					 u8 tag, u32 cmd_issued)
932  {
933  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
934  
935  	hsdevp->cmd_issued[tag] = cmd_issued;
936  
937  	/*
938  	 * Clear SError before executing a new command.
939  	 * sata_dwc_scr_write and read can not be used here. Clearing the PM
940  	 * managed SError register for the disk needs to be done before the
941  	 * task file is loaded.
942  	 */
943  	clear_serror(ap);
944  	ata_sff_exec_command(ap, tf);
945  }
946  
sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd * qc,u8 tag)947  static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
948  {
949  	sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
950  				     SATA_DWC_CMD_ISSUED_PEND);
951  }
952  
sata_dwc_bmdma_setup(struct ata_queued_cmd * qc)953  static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
954  {
955  	u8 tag = qc->hw_tag;
956  
957  	if (!ata_is_ncq(qc->tf.protocol))
958  		tag = 0;
959  
960  	sata_dwc_bmdma_setup_by_tag(qc, tag);
961  }
962  
sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd * qc,u8 tag)963  static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
964  {
965  	int start_dma;
966  	u32 reg;
967  	struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
968  	struct ata_port *ap = qc->ap;
969  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
970  	struct dma_async_tx_descriptor *desc = hsdevp->desc[tag];
971  	int dir = qc->dma_dir;
972  
973  	if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
974  		start_dma = 1;
975  		if (dir == DMA_TO_DEVICE)
976  			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
977  		else
978  			hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
979  	} else {
980  		dev_err(ap->dev,
981  			"%s: Command not pending cmd_issued=%d (tag=%d) DMA NOT started\n",
982  			__func__, hsdevp->cmd_issued[tag], tag);
983  		start_dma = 0;
984  	}
985  
986  	if (start_dma) {
987  		sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
988  		if (reg & SATA_DWC_SERROR_ERR_BITS) {
989  			dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
990  				__func__, reg);
991  		}
992  
993  		if (dir == DMA_TO_DEVICE)
994  			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
995  					SATA_DWC_DMACR_TXCHEN);
996  		else
997  			sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
998  					SATA_DWC_DMACR_RXCHEN);
999  
1000  		/* Enable AHB DMA transfer on the specified channel */
1001  		dmaengine_submit(desc);
1002  		dma_async_issue_pending(hsdevp->chan);
1003  	}
1004  }
1005  
sata_dwc_bmdma_start(struct ata_queued_cmd * qc)1006  static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
1007  {
1008  	u8 tag = qc->hw_tag;
1009  
1010  	if (!ata_is_ncq(qc->tf.protocol))
1011  		tag = 0;
1012  
1013  	sata_dwc_bmdma_start_by_tag(qc, tag);
1014  }
1015  
sata_dwc_qc_issue(struct ata_queued_cmd * qc)1016  static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
1017  {
1018  	u32 sactive;
1019  	u8 tag = qc->hw_tag;
1020  	struct ata_port *ap = qc->ap;
1021  	struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
1022  
1023  	if (!ata_is_ncq(qc->tf.protocol))
1024  		tag = 0;
1025  
1026  	if (ata_is_dma(qc->tf.protocol)) {
1027  		hsdevp->desc[tag] = dma_dwc_xfer_setup(qc);
1028  		if (!hsdevp->desc[tag])
1029  			return AC_ERR_SYSTEM;
1030  	} else {
1031  		hsdevp->desc[tag] = NULL;
1032  	}
1033  
1034  	if (ata_is_ncq(qc->tf.protocol)) {
1035  		sata_dwc_scr_read(&ap->link, SCR_ACTIVE, &sactive);
1036  		sactive |= (0x00000001 << tag);
1037  		sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
1038  
1039  		trace_ata_tf_load(ap, &qc->tf);
1040  		ap->ops->sff_tf_load(ap, &qc->tf);
1041  		trace_ata_exec_command(ap, &qc->tf, tag);
1042  		sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
1043  					     SATA_DWC_CMD_ISSUED_PEND);
1044  	} else {
1045  		return ata_bmdma_qc_issue(qc);
1046  	}
1047  	return 0;
1048  }
1049  
sata_dwc_error_handler(struct ata_port * ap)1050  static void sata_dwc_error_handler(struct ata_port *ap)
1051  {
1052  	ata_sff_error_handler(ap);
1053  }
1054  
sata_dwc_hardreset(struct ata_link * link,unsigned int * class,unsigned long deadline)1055  static int sata_dwc_hardreset(struct ata_link *link, unsigned int *class,
1056  			      unsigned long deadline)
1057  {
1058  	struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
1059  	int ret;
1060  
1061  	ret = sata_sff_hardreset(link, class, deadline);
1062  
1063  	sata_dwc_enable_interrupts(hsdev);
1064  
1065  	/* Reconfigure the DMA control register */
1066  	sata_dwc_writel(&hsdev->sata_dwc_regs->dmacr,
1067  			SATA_DWC_DMACR_TXRXCH_CLEAR);
1068  
1069  	/* Reconfigure the DMA Burst Transaction Size register */
1070  	sata_dwc_writel(&hsdev->sata_dwc_regs->dbtsr,
1071  			SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
1072  			SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT));
1073  
1074  	return ret;
1075  }
1076  
sata_dwc_dev_select(struct ata_port * ap,unsigned int device)1077  static void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
1078  {
1079  	/* SATA DWC is master only */
1080  }
1081  
1082  /*
1083   * scsi mid-layer and libata interface structures
1084   */
1085  static struct scsi_host_template sata_dwc_sht = {
1086  	ATA_NCQ_SHT(DRV_NAME),
1087  	/*
1088  	 * test-only: Currently this driver doesn't handle NCQ
1089  	 * correctly. We enable NCQ but set the queue depth to a
1090  	 * max of 1. This will get fixed in in a future release.
1091  	 */
1092  	.sg_tablesize		= LIBATA_MAX_PRD,
1093  	/* .can_queue		= ATA_MAX_QUEUE, */
1094  	/*
1095  	 * Make sure a LLI block is not created that will span 8K max FIS
1096  	 * boundary. If the block spans such a FIS boundary, there is a chance
1097  	 * that a DMA burst will cross that boundary -- this results in an
1098  	 * error in the host controller.
1099  	 */
1100  	.dma_boundary		= 0x1fff /* ATA_DMA_BOUNDARY */,
1101  };
1102  
1103  static struct ata_port_operations sata_dwc_ops = {
1104  	.inherits		= &ata_sff_port_ops,
1105  
1106  	.error_handler		= sata_dwc_error_handler,
1107  	.hardreset		= sata_dwc_hardreset,
1108  
1109  	.qc_issue		= sata_dwc_qc_issue,
1110  
1111  	.scr_read		= sata_dwc_scr_read,
1112  	.scr_write		= sata_dwc_scr_write,
1113  
1114  	.port_start		= sata_dwc_port_start,
1115  	.port_stop		= sata_dwc_port_stop,
1116  
1117  	.sff_dev_select		= sata_dwc_dev_select,
1118  
1119  	.bmdma_setup		= sata_dwc_bmdma_setup,
1120  	.bmdma_start		= sata_dwc_bmdma_start,
1121  };
1122  
1123  static const struct ata_port_info sata_dwc_port_info[] = {
1124  	{
1125  		.flags		= ATA_FLAG_SATA | ATA_FLAG_NCQ,
1126  		.pio_mask	= ATA_PIO4,
1127  		.udma_mask	= ATA_UDMA6,
1128  		.port_ops	= &sata_dwc_ops,
1129  	},
1130  };
1131  
sata_dwc_probe(struct platform_device * ofdev)1132  static int sata_dwc_probe(struct platform_device *ofdev)
1133  {
1134  	struct device *dev = &ofdev->dev;
1135  	struct device_node *np = dev->of_node;
1136  	struct sata_dwc_device *hsdev;
1137  	u32 idr, versionr;
1138  	char *ver = (char *)&versionr;
1139  	void __iomem *base;
1140  	int err = 0;
1141  	int irq;
1142  	struct ata_host *host;
1143  	struct ata_port_info pi = sata_dwc_port_info[0];
1144  	const struct ata_port_info *ppi[] = { &pi, NULL };
1145  	struct resource *res;
1146  
1147  	/* Allocate DWC SATA device */
1148  	host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
1149  	hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
1150  	if (!host || !hsdev)
1151  		return -ENOMEM;
1152  
1153  	host->private_data = hsdev;
1154  
1155  	/* Ioremap SATA registers */
1156  	base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
1157  	if (IS_ERR(base))
1158  		return PTR_ERR(base);
1159  	dev_dbg(dev, "ioremap done for SATA register address\n");
1160  
1161  	/* Synopsys DWC SATA specific Registers */
1162  	hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
1163  	hsdev->dmadr = res->start + SATA_DWC_REG_OFFSET + offsetof(struct sata_dwc_regs, dmadr);
1164  
1165  	/* Setup port */
1166  	host->ports[0]->ioaddr.cmd_addr = base;
1167  	host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
1168  	sata_dwc_setup_port(&host->ports[0]->ioaddr, base);
1169  
1170  	/* Read the ID and Version Registers */
1171  	idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
1172  	versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
1173  	dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
1174  
1175  	/* Save dev for later use in dev_xxx() routines */
1176  	hsdev->dev = dev;
1177  
1178  	/* Enable SATA Interrupts */
1179  	sata_dwc_enable_interrupts(hsdev);
1180  
1181  	/* Get SATA interrupt number */
1182  	irq = irq_of_parse_and_map(np, 0);
1183  	if (irq == NO_IRQ) {
1184  		dev_err(dev, "no SATA DMA irq\n");
1185  		return -ENODEV;
1186  	}
1187  
1188  #ifdef CONFIG_SATA_DWC_OLD_DMA
1189  	if (!of_find_property(np, "dmas", NULL)) {
1190  		err = sata_dwc_dma_init_old(ofdev, hsdev);
1191  		if (err)
1192  			return err;
1193  	}
1194  #endif
1195  
1196  	hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
1197  	if (IS_ERR(hsdev->phy))
1198  		return PTR_ERR(hsdev->phy);
1199  
1200  	err = phy_init(hsdev->phy);
1201  	if (err)
1202  		goto error_out;
1203  
1204  	/*
1205  	 * Now, register with libATA core, this will also initiate the
1206  	 * device discovery process, invoking our port_start() handler &
1207  	 * error_handler() to execute a dummy Softreset EH session
1208  	 */
1209  	err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
1210  	if (err)
1211  		dev_err(dev, "failed to activate host");
1212  
1213  	return 0;
1214  
1215  error_out:
1216  	phy_exit(hsdev->phy);
1217  	return err;
1218  }
1219  
sata_dwc_remove(struct platform_device * ofdev)1220  static int sata_dwc_remove(struct platform_device *ofdev)
1221  {
1222  	struct device *dev = &ofdev->dev;
1223  	struct ata_host *host = dev_get_drvdata(dev);
1224  	struct sata_dwc_device *hsdev = host->private_data;
1225  
1226  	ata_host_detach(host);
1227  
1228  	phy_exit(hsdev->phy);
1229  
1230  #ifdef CONFIG_SATA_DWC_OLD_DMA
1231  	/* Free SATA DMA resources */
1232  	sata_dwc_dma_exit_old(hsdev);
1233  #endif
1234  
1235  	dev_dbg(dev, "done\n");
1236  	return 0;
1237  }
1238  
1239  static const struct of_device_id sata_dwc_match[] = {
1240  	{ .compatible = "amcc,sata-460ex", },
1241  	{}
1242  };
1243  MODULE_DEVICE_TABLE(of, sata_dwc_match);
1244  
1245  static struct platform_driver sata_dwc_driver = {
1246  	.driver = {
1247  		.name = DRV_NAME,
1248  		.of_match_table = sata_dwc_match,
1249  	},
1250  	.probe = sata_dwc_probe,
1251  	.remove = sata_dwc_remove,
1252  };
1253  
1254  module_platform_driver(sata_dwc_driver);
1255  
1256  MODULE_LICENSE("GPL");
1257  MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
1258  MODULE_DESCRIPTION("DesignWare Cores SATA controller low level driver");
1259  MODULE_VERSION(DRV_VERSION);
1260