1  /*
2   * Marvell 88SE64xx/88SE94xx main function
3   *
4   * Copyright 2007 Red Hat, Inc.
5   * Copyright 2008 Marvell. <kewei@marvell.com>
6   * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
7   *
8   * This file is licensed under GPLv2.
9   *
10   * This program is free software; you can redistribute it and/or
11   * modify it under the terms of the GNU General Public License as
12   * published by the Free Software Foundation; version 2 of the
13   * License.
14   *
15   * This program is distributed in the hope that it will be useful,
16   * but WITHOUT ANY WARRANTY; without even the implied warranty of
17   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18   * General Public License for more details.
19   *
20   * You should have received a copy of the GNU General Public License
21   * along with this program; if not, write to the Free Software
22   * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
23   * USA
24  */
25  
26  #include "mv_sas.h"
27  
mvs_find_tag(struct mvs_info * mvi,struct sas_task * task,u32 * tag)28  static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
29  {
30  	if (task->lldd_task) {
31  		struct mvs_slot_info *slot;
32  		slot = task->lldd_task;
33  		*tag = slot->slot_tag;
34  		return 1;
35  	}
36  	return 0;
37  }
38  
mvs_tag_clear(struct mvs_info * mvi,u32 tag)39  void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
40  {
41  	void *bitmap = mvi->tags;
42  	clear_bit(tag, bitmap);
43  }
44  
mvs_tag_free(struct mvs_info * mvi,u32 tag)45  void mvs_tag_free(struct mvs_info *mvi, u32 tag)
46  {
47  	mvs_tag_clear(mvi, tag);
48  }
49  
mvs_tag_set(struct mvs_info * mvi,unsigned int tag)50  void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
51  {
52  	void *bitmap = mvi->tags;
53  	set_bit(tag, bitmap);
54  }
55  
mvs_tag_alloc(struct mvs_info * mvi,u32 * tag_out)56  inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
57  {
58  	unsigned int index, tag;
59  	void *bitmap = mvi->tags;
60  
61  	index = find_first_zero_bit(bitmap, mvi->tags_num);
62  	tag = index;
63  	if (tag >= mvi->tags_num)
64  		return -SAS_QUEUE_FULL;
65  	mvs_tag_set(mvi, tag);
66  	*tag_out = tag;
67  	return 0;
68  }
69  
mvs_tag_init(struct mvs_info * mvi)70  void mvs_tag_init(struct mvs_info *mvi)
71  {
72  	int i;
73  	for (i = 0; i < mvi->tags_num; ++i)
74  		mvs_tag_clear(mvi, i);
75  }
76  
mvs_find_dev_mvi(struct domain_device * dev)77  static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
78  {
79  	unsigned long i = 0, j = 0, hi = 0;
80  	struct sas_ha_struct *sha = dev->port->ha;
81  	struct mvs_info *mvi = NULL;
82  	struct asd_sas_phy *phy;
83  
84  	while (sha->sas_port[i]) {
85  		if (sha->sas_port[i] == dev->port) {
86  			phy =  container_of(sha->sas_port[i]->phy_list.next,
87  				struct asd_sas_phy, port_phy_el);
88  			j = 0;
89  			while (sha->sas_phy[j]) {
90  				if (sha->sas_phy[j] == phy)
91  					break;
92  				j++;
93  			}
94  			break;
95  		}
96  		i++;
97  	}
98  	hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
99  	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
100  
101  	return mvi;
102  
103  }
104  
mvs_find_dev_phyno(struct domain_device * dev,int * phyno)105  static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
106  {
107  	unsigned long i = 0, j = 0, n = 0, num = 0;
108  	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
109  	struct mvs_info *mvi = mvi_dev->mvi_info;
110  	struct sas_ha_struct *sha = dev->port->ha;
111  
112  	while (sha->sas_port[i]) {
113  		if (sha->sas_port[i] == dev->port) {
114  			struct asd_sas_phy *phy;
115  			list_for_each_entry(phy,
116  				&sha->sas_port[i]->phy_list, port_phy_el) {
117  				j = 0;
118  				while (sha->sas_phy[j]) {
119  					if (sha->sas_phy[j] == phy)
120  						break;
121  					j++;
122  				}
123  				phyno[n] = (j >= mvi->chip->n_phy) ?
124  					(j - mvi->chip->n_phy) : j;
125  				num++;
126  				n++;
127  			}
128  			break;
129  		}
130  		i++;
131  	}
132  	return num;
133  }
134  
mvs_find_dev_by_reg_set(struct mvs_info * mvi,u8 reg_set)135  struct mvs_device *mvs_find_dev_by_reg_set(struct mvs_info *mvi,
136  						u8 reg_set)
137  {
138  	u32 dev_no;
139  	for (dev_no = 0; dev_no < MVS_MAX_DEVICES; dev_no++) {
140  		if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED)
141  			continue;
142  
143  		if (mvi->devices[dev_no].taskfileset == reg_set)
144  			return &mvi->devices[dev_no];
145  	}
146  	return NULL;
147  }
148  
mvs_free_reg_set(struct mvs_info * mvi,struct mvs_device * dev)149  static inline void mvs_free_reg_set(struct mvs_info *mvi,
150  				struct mvs_device *dev)
151  {
152  	if (!dev) {
153  		mv_printk("device has been free.\n");
154  		return;
155  	}
156  	if (dev->taskfileset == MVS_ID_NOT_MAPPED)
157  		return;
158  	MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
159  }
160  
mvs_assign_reg_set(struct mvs_info * mvi,struct mvs_device * dev)161  static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
162  				struct mvs_device *dev)
163  {
164  	if (dev->taskfileset != MVS_ID_NOT_MAPPED)
165  		return 0;
166  	return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
167  }
168  
mvs_phys_reset(struct mvs_info * mvi,u32 phy_mask,int hard)169  void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
170  {
171  	u32 no;
172  	for_each_phy(phy_mask, phy_mask, no) {
173  		if (!(phy_mask & 1))
174  			continue;
175  		MVS_CHIP_DISP->phy_reset(mvi, no, hard);
176  	}
177  }
178  
mvs_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)179  int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
180  			void *funcdata)
181  {
182  	int rc = 0, phy_id = sas_phy->id;
183  	u32 tmp, i = 0, hi;
184  	struct sas_ha_struct *sha = sas_phy->ha;
185  	struct mvs_info *mvi = NULL;
186  
187  	while (sha->sas_phy[i]) {
188  		if (sha->sas_phy[i] == sas_phy)
189  			break;
190  		i++;
191  	}
192  	hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
193  	mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
194  
195  	switch (func) {
196  	case PHY_FUNC_SET_LINK_RATE:
197  		MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
198  		break;
199  
200  	case PHY_FUNC_HARD_RESET:
201  		tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
202  		if (tmp & PHY_RST_HARD)
203  			break;
204  		MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET);
205  		break;
206  
207  	case PHY_FUNC_LINK_RESET:
208  		MVS_CHIP_DISP->phy_enable(mvi, phy_id);
209  		MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET);
210  		break;
211  
212  	case PHY_FUNC_DISABLE:
213  		MVS_CHIP_DISP->phy_disable(mvi, phy_id);
214  		break;
215  	case PHY_FUNC_RELEASE_SPINUP_HOLD:
216  	default:
217  		rc = -ENOSYS;
218  	}
219  	msleep(200);
220  	return rc;
221  }
222  
mvs_set_sas_addr(struct mvs_info * mvi,int port_id,u32 off_lo,u32 off_hi,u64 sas_addr)223  void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
224  		      u32 off_hi, u64 sas_addr)
225  {
226  	u32 lo = (u32)sas_addr;
227  	u32 hi = (u32)(sas_addr>>32);
228  
229  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
230  	MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
231  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
232  	MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
233  }
234  
mvs_bytes_dmaed(struct mvs_info * mvi,int i)235  static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
236  {
237  	struct mvs_phy *phy = &mvi->phy[i];
238  	struct asd_sas_phy *sas_phy = &phy->sas_phy;
239  	struct sas_ha_struct *sas_ha;
240  	if (!phy->phy_attached)
241  		return;
242  
243  	if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
244  		&& phy->phy_type & PORT_TYPE_SAS) {
245  		return;
246  	}
247  
248  	sas_ha = mvi->sas;
249  	sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
250  
251  	if (sas_phy->phy) {
252  		struct sas_phy *sphy = sas_phy->phy;
253  
254  		sphy->negotiated_linkrate = sas_phy->linkrate;
255  		sphy->minimum_linkrate = phy->minimum_linkrate;
256  		sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
257  		sphy->maximum_linkrate = phy->maximum_linkrate;
258  		sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
259  	}
260  
261  	if (phy->phy_type & PORT_TYPE_SAS) {
262  		struct sas_identify_frame *id;
263  
264  		id = (struct sas_identify_frame *)phy->frame_rcvd;
265  		id->dev_type = phy->identify.device_type;
266  		id->initiator_bits = SAS_PROTOCOL_ALL;
267  		id->target_bits = phy->identify.target_port_protocols;
268  
269  		/* direct attached SAS device */
270  		if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
271  			MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
272  			MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00);
273  		}
274  	} else if (phy->phy_type & PORT_TYPE_SATA) {
275  		/*Nothing*/
276  	}
277  	mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
278  
279  	sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
280  
281  	mvi->sas->notify_port_event(sas_phy,
282  				   PORTE_BYTES_DMAED);
283  }
284  
mvs_scan_start(struct Scsi_Host * shost)285  void mvs_scan_start(struct Scsi_Host *shost)
286  {
287  	int i, j;
288  	unsigned short core_nr;
289  	struct mvs_info *mvi;
290  	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
291  	struct mvs_prv_info *mvs_prv = sha->lldd_ha;
292  
293  	core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
294  
295  	for (j = 0; j < core_nr; j++) {
296  		mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
297  		for (i = 0; i < mvi->chip->n_phy; ++i)
298  			mvs_bytes_dmaed(mvi, i);
299  	}
300  	mvs_prv->scan_finished = 1;
301  }
302  
mvs_scan_finished(struct Scsi_Host * shost,unsigned long time)303  int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
304  {
305  	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
306  	struct mvs_prv_info *mvs_prv = sha->lldd_ha;
307  
308  	if (mvs_prv->scan_finished == 0)
309  		return 0;
310  
311  	sas_drain_work(sha);
312  	return 1;
313  }
314  
mvs_task_prep_smp(struct mvs_info * mvi,struct mvs_task_exec_info * tei)315  static int mvs_task_prep_smp(struct mvs_info *mvi,
316  			     struct mvs_task_exec_info *tei)
317  {
318  	int elem, rc, i;
319  	struct sas_ha_struct *sha = mvi->sas;
320  	struct sas_task *task = tei->task;
321  	struct mvs_cmd_hdr *hdr = tei->hdr;
322  	struct domain_device *dev = task->dev;
323  	struct asd_sas_port *sas_port = dev->port;
324  	struct sas_phy *sphy = dev->phy;
325  	struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
326  	struct scatterlist *sg_req, *sg_resp;
327  	u32 req_len, resp_len, tag = tei->tag;
328  	void *buf_tmp;
329  	u8 *buf_oaf;
330  	dma_addr_t buf_tmp_dma;
331  	void *buf_prd;
332  	struct mvs_slot_info *slot = &mvi->slot_info[tag];
333  	u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
334  
335  	/*
336  	 * DMA-map SMP request, response buffers
337  	 */
338  	sg_req = &task->smp_task.smp_req;
339  	elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
340  	if (!elem)
341  		return -ENOMEM;
342  	req_len = sg_dma_len(sg_req);
343  
344  	sg_resp = &task->smp_task.smp_resp;
345  	elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
346  	if (!elem) {
347  		rc = -ENOMEM;
348  		goto err_out;
349  	}
350  	resp_len = SB_RFB_MAX;
351  
352  	/* must be in dwords */
353  	if ((req_len & 0x3) || (resp_len & 0x3)) {
354  		rc = -EINVAL;
355  		goto err_out_2;
356  	}
357  
358  	/*
359  	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
360  	 */
361  
362  	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
363  	buf_tmp = slot->buf;
364  	buf_tmp_dma = slot->buf_dma;
365  
366  	hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
367  
368  	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
369  	buf_oaf = buf_tmp;
370  	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
371  
372  	buf_tmp += MVS_OAF_SZ;
373  	buf_tmp_dma += MVS_OAF_SZ;
374  
375  	/* region 3: PRD table *********************************** */
376  	buf_prd = buf_tmp;
377  	if (tei->n_elem)
378  		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
379  	else
380  		hdr->prd_tbl = 0;
381  
382  	i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
383  	buf_tmp += i;
384  	buf_tmp_dma += i;
385  
386  	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
387  	slot->response = buf_tmp;
388  	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
389  	if (mvi->flags & MVF_FLAG_SOC)
390  		hdr->reserved[0] = 0;
391  
392  	/*
393  	 * Fill in TX ring and command slot header
394  	 */
395  	slot->tx = mvi->tx_prod;
396  	mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
397  					TXQ_MODE_I | tag |
398  					(MVS_PHY_ID << TXQ_PHY_SHIFT));
399  
400  	hdr->flags |= flags;
401  	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
402  	hdr->tags = cpu_to_le32(tag);
403  	hdr->data_len = 0;
404  
405  	/* generate open address frame hdr (first 12 bytes) */
406  	/* initiator, SMP, ftype 1h */
407  	buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
408  	buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
409  	*(u16 *)(buf_oaf + 2) = 0xFFFF;		/* SAS SPEC */
410  	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
411  
412  	/* fill in PRD (scatter/gather) table, if any */
413  	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
414  
415  	return 0;
416  
417  err_out_2:
418  	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
419  		     PCI_DMA_FROMDEVICE);
420  err_out:
421  	dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
422  		     PCI_DMA_TODEVICE);
423  	return rc;
424  }
425  
mvs_get_ncq_tag(struct sas_task * task,u32 * tag)426  static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
427  {
428  	struct ata_queued_cmd *qc = task->uldd_task;
429  
430  	if (qc) {
431  		if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
432  		    qc->tf.command == ATA_CMD_FPDMA_READ ||
433  		    qc->tf.command == ATA_CMD_FPDMA_RECV ||
434  		    qc->tf.command == ATA_CMD_FPDMA_SEND ||
435  		    qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
436  			*tag = qc->tag;
437  			return 1;
438  		}
439  	}
440  
441  	return 0;
442  }
443  
mvs_task_prep_ata(struct mvs_info * mvi,struct mvs_task_exec_info * tei)444  static int mvs_task_prep_ata(struct mvs_info *mvi,
445  			     struct mvs_task_exec_info *tei)
446  {
447  	struct sas_task *task = tei->task;
448  	struct domain_device *dev = task->dev;
449  	struct mvs_device *mvi_dev = dev->lldd_dev;
450  	struct mvs_cmd_hdr *hdr = tei->hdr;
451  	struct asd_sas_port *sas_port = dev->port;
452  	struct mvs_slot_info *slot;
453  	void *buf_prd;
454  	u32 tag = tei->tag, hdr_tag;
455  	u32 flags, del_q;
456  	void *buf_tmp;
457  	u8 *buf_cmd, *buf_oaf;
458  	dma_addr_t buf_tmp_dma;
459  	u32 i, req_len, resp_len;
460  	const u32 max_resp_len = SB_RFB_MAX;
461  
462  	if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
463  		mv_dprintk("Have not enough regiset for dev %d.\n",
464  			mvi_dev->device_id);
465  		return -EBUSY;
466  	}
467  	slot = &mvi->slot_info[tag];
468  	slot->tx = mvi->tx_prod;
469  	del_q = TXQ_MODE_I | tag |
470  		(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
471  		((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
472  		(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
473  	mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
474  
475  	if (task->data_dir == DMA_FROM_DEVICE)
476  		flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
477  	else
478  		flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
479  
480  	if (task->ata_task.use_ncq)
481  		flags |= MCH_FPDMA;
482  	if (dev->sata_dev.class == ATA_DEV_ATAPI) {
483  		if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
484  			flags |= MCH_ATAPI;
485  	}
486  
487  	hdr->flags = cpu_to_le32(flags);
488  
489  	if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
490  		task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
491  	else
492  		hdr_tag = tag;
493  
494  	hdr->tags = cpu_to_le32(hdr_tag);
495  
496  	hdr->data_len = cpu_to_le32(task->total_xfer_len);
497  
498  	/*
499  	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
500  	 */
501  
502  	/* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
503  	buf_cmd = buf_tmp = slot->buf;
504  	buf_tmp_dma = slot->buf_dma;
505  
506  	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
507  
508  	buf_tmp += MVS_ATA_CMD_SZ;
509  	buf_tmp_dma += MVS_ATA_CMD_SZ;
510  
511  	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
512  	/* used for STP.  unused for SATA? */
513  	buf_oaf = buf_tmp;
514  	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
515  
516  	buf_tmp += MVS_OAF_SZ;
517  	buf_tmp_dma += MVS_OAF_SZ;
518  
519  	/* region 3: PRD table ********************************************* */
520  	buf_prd = buf_tmp;
521  
522  	if (tei->n_elem)
523  		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
524  	else
525  		hdr->prd_tbl = 0;
526  	i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
527  
528  	buf_tmp += i;
529  	buf_tmp_dma += i;
530  
531  	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
532  	slot->response = buf_tmp;
533  	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
534  	if (mvi->flags & MVF_FLAG_SOC)
535  		hdr->reserved[0] = 0;
536  
537  	req_len = sizeof(struct host_to_dev_fis);
538  	resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
539  	    sizeof(struct mvs_err_info) - i;
540  
541  	/* request, response lengths */
542  	resp_len = min(resp_len, max_resp_len);
543  	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
544  
545  	if (likely(!task->ata_task.device_control_reg_update))
546  		task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
547  	/* fill in command FIS and ATAPI CDB */
548  	memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
549  	if (dev->sata_dev.class == ATA_DEV_ATAPI)
550  		memcpy(buf_cmd + STP_ATAPI_CMD,
551  			task->ata_task.atapi_packet, 16);
552  
553  	/* generate open address frame hdr (first 12 bytes) */
554  	/* initiator, STP, ftype 1h */
555  	buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
556  	buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
557  	*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
558  	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
559  
560  	/* fill in PRD (scatter/gather) table, if any */
561  	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
562  
563  	if (task->data_dir == DMA_FROM_DEVICE)
564  		MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask,
565  				TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
566  
567  	return 0;
568  }
569  
mvs_task_prep_ssp(struct mvs_info * mvi,struct mvs_task_exec_info * tei,int is_tmf,struct mvs_tmf_task * tmf)570  static int mvs_task_prep_ssp(struct mvs_info *mvi,
571  			     struct mvs_task_exec_info *tei, int is_tmf,
572  			     struct mvs_tmf_task *tmf)
573  {
574  	struct sas_task *task = tei->task;
575  	struct mvs_cmd_hdr *hdr = tei->hdr;
576  	struct mvs_port *port = tei->port;
577  	struct domain_device *dev = task->dev;
578  	struct mvs_device *mvi_dev = dev->lldd_dev;
579  	struct asd_sas_port *sas_port = dev->port;
580  	struct mvs_slot_info *slot;
581  	void *buf_prd;
582  	struct ssp_frame_hdr *ssp_hdr;
583  	void *buf_tmp;
584  	u8 *buf_cmd, *buf_oaf, fburst = 0;
585  	dma_addr_t buf_tmp_dma;
586  	u32 flags;
587  	u32 resp_len, req_len, i, tag = tei->tag;
588  	const u32 max_resp_len = SB_RFB_MAX;
589  	u32 phy_mask;
590  
591  	slot = &mvi->slot_info[tag];
592  
593  	phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
594  		sas_port->phy_mask) & TXQ_PHY_MASK;
595  
596  	slot->tx = mvi->tx_prod;
597  	mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
598  				(TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
599  				(phy_mask << TXQ_PHY_SHIFT));
600  
601  	flags = MCH_RETRY;
602  	if (task->ssp_task.enable_first_burst) {
603  		flags |= MCH_FBURST;
604  		fburst = (1 << 7);
605  	}
606  	if (is_tmf)
607  		flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
608  	else
609  		flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
610  
611  	hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
612  	hdr->tags = cpu_to_le32(tag);
613  	hdr->data_len = cpu_to_le32(task->total_xfer_len);
614  
615  	/*
616  	 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
617  	 */
618  
619  	/* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
620  	buf_cmd = buf_tmp = slot->buf;
621  	buf_tmp_dma = slot->buf_dma;
622  
623  	hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
624  
625  	buf_tmp += MVS_SSP_CMD_SZ;
626  	buf_tmp_dma += MVS_SSP_CMD_SZ;
627  
628  	/* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
629  	buf_oaf = buf_tmp;
630  	hdr->open_frame = cpu_to_le64(buf_tmp_dma);
631  
632  	buf_tmp += MVS_OAF_SZ;
633  	buf_tmp_dma += MVS_OAF_SZ;
634  
635  	/* region 3: PRD table ********************************************* */
636  	buf_prd = buf_tmp;
637  	if (tei->n_elem)
638  		hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
639  	else
640  		hdr->prd_tbl = 0;
641  
642  	i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
643  	buf_tmp += i;
644  	buf_tmp_dma += i;
645  
646  	/* region 4: status buffer (larger the PRD, smaller this buf) ****** */
647  	slot->response = buf_tmp;
648  	hdr->status_buf = cpu_to_le64(buf_tmp_dma);
649  	if (mvi->flags & MVF_FLAG_SOC)
650  		hdr->reserved[0] = 0;
651  
652  	resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
653  	    sizeof(struct mvs_err_info) - i;
654  	resp_len = min(resp_len, max_resp_len);
655  
656  	req_len = sizeof(struct ssp_frame_hdr) + 28;
657  
658  	/* request, response lengths */
659  	hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
660  
661  	/* generate open address frame hdr (first 12 bytes) */
662  	/* initiator, SSP, ftype 1h */
663  	buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
664  	buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf;
665  	*(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
666  	memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
667  
668  	/* fill in SSP frame header (Command Table.SSP frame header) */
669  	ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
670  
671  	if (is_tmf)
672  		ssp_hdr->frame_type = SSP_TASK;
673  	else
674  		ssp_hdr->frame_type = SSP_COMMAND;
675  
676  	memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
677  	       HASHED_SAS_ADDR_SIZE);
678  	memcpy(ssp_hdr->hashed_src_addr,
679  	       dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
680  	ssp_hdr->tag = cpu_to_be16(tag);
681  
682  	/* fill in IU for TASK and Command Frame */
683  	buf_cmd += sizeof(*ssp_hdr);
684  	memcpy(buf_cmd, &task->ssp_task.LUN, 8);
685  
686  	if (ssp_hdr->frame_type != SSP_TASK) {
687  		buf_cmd[9] = fburst | task->ssp_task.task_attr |
688  				(task->ssp_task.task_prio << 3);
689  		memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd,
690  		       task->ssp_task.cmd->cmd_len);
691  	} else{
692  		buf_cmd[10] = tmf->tmf;
693  		switch (tmf->tmf) {
694  		case TMF_ABORT_TASK:
695  		case TMF_QUERY_TASK:
696  			buf_cmd[12] =
697  				(tmf->tag_of_task_to_be_managed >> 8) & 0xff;
698  			buf_cmd[13] =
699  				tmf->tag_of_task_to_be_managed & 0xff;
700  			break;
701  		default:
702  			break;
703  		}
704  	}
705  	/* fill in PRD (scatter/gather) table, if any */
706  	MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
707  	return 0;
708  }
709  
710  #define	DEV_IS_GONE(mvi_dev)	((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
mvs_task_prep(struct sas_task * task,struct mvs_info * mvi,int is_tmf,struct mvs_tmf_task * tmf,int * pass)711  static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf,
712  				struct mvs_tmf_task *tmf, int *pass)
713  {
714  	struct domain_device *dev = task->dev;
715  	struct mvs_device *mvi_dev = dev->lldd_dev;
716  	struct mvs_task_exec_info tei;
717  	struct mvs_slot_info *slot;
718  	u32 tag = 0xdeadbeef, n_elem = 0;
719  	int rc = 0;
720  
721  	if (!dev->port) {
722  		struct task_status_struct *tsm = &task->task_status;
723  
724  		tsm->resp = SAS_TASK_UNDELIVERED;
725  		tsm->stat = SAS_PHY_DOWN;
726  		/*
727  		 * libsas will use dev->port, should
728  		 * not call task_done for sata
729  		 */
730  		if (dev->dev_type != SAS_SATA_DEV)
731  			task->task_done(task);
732  		return rc;
733  	}
734  
735  	if (DEV_IS_GONE(mvi_dev)) {
736  		if (mvi_dev)
737  			mv_dprintk("device %d not ready.\n",
738  				mvi_dev->device_id);
739  		else
740  			mv_dprintk("device %016llx not ready.\n",
741  				SAS_ADDR(dev->sas_addr));
742  
743  		rc = SAS_PHY_DOWN;
744  		return rc;
745  	}
746  	tei.port = dev->port->lldd_port;
747  	if (tei.port && !tei.port->port_attached && !tmf) {
748  		if (sas_protocol_ata(task->task_proto)) {
749  			struct task_status_struct *ts = &task->task_status;
750  			mv_dprintk("SATA/STP port %d does not attach"
751  					"device.\n", dev->port->id);
752  			ts->resp = SAS_TASK_COMPLETE;
753  			ts->stat = SAS_PHY_DOWN;
754  
755  			task->task_done(task);
756  
757  		} else {
758  			struct task_status_struct *ts = &task->task_status;
759  			mv_dprintk("SAS port %d does not attach"
760  				"device.\n", dev->port->id);
761  			ts->resp = SAS_TASK_UNDELIVERED;
762  			ts->stat = SAS_PHY_DOWN;
763  			task->task_done(task);
764  		}
765  		return rc;
766  	}
767  
768  	if (!sas_protocol_ata(task->task_proto)) {
769  		if (task->num_scatter) {
770  			n_elem = dma_map_sg(mvi->dev,
771  					    task->scatter,
772  					    task->num_scatter,
773  					    task->data_dir);
774  			if (!n_elem) {
775  				rc = -ENOMEM;
776  				goto prep_out;
777  			}
778  		}
779  	} else {
780  		n_elem = task->num_scatter;
781  	}
782  
783  	rc = mvs_tag_alloc(mvi, &tag);
784  	if (rc)
785  		goto err_out;
786  
787  	slot = &mvi->slot_info[tag];
788  
789  	task->lldd_task = NULL;
790  	slot->n_elem = n_elem;
791  	slot->slot_tag = tag;
792  
793  	slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
794  	if (!slot->buf) {
795  		rc = -ENOMEM;
796  		goto err_out_tag;
797  	}
798  	memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
799  
800  	tei.task = task;
801  	tei.hdr = &mvi->slot[tag];
802  	tei.tag = tag;
803  	tei.n_elem = n_elem;
804  	switch (task->task_proto) {
805  	case SAS_PROTOCOL_SMP:
806  		rc = mvs_task_prep_smp(mvi, &tei);
807  		break;
808  	case SAS_PROTOCOL_SSP:
809  		rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
810  		break;
811  	case SAS_PROTOCOL_SATA:
812  	case SAS_PROTOCOL_STP:
813  	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
814  		rc = mvs_task_prep_ata(mvi, &tei);
815  		break;
816  	default:
817  		dev_printk(KERN_ERR, mvi->dev,
818  			"unknown sas_task proto: 0x%x\n",
819  			task->task_proto);
820  		rc = -EINVAL;
821  		break;
822  	}
823  
824  	if (rc) {
825  		mv_dprintk("rc is %x\n", rc);
826  		goto err_out_slot_buf;
827  	}
828  	slot->task = task;
829  	slot->port = tei.port;
830  	task->lldd_task = slot;
831  	list_add_tail(&slot->entry, &tei.port->list);
832  	spin_lock(&task->task_state_lock);
833  	task->task_state_flags |= SAS_TASK_AT_INITIATOR;
834  	spin_unlock(&task->task_state_lock);
835  
836  	mvi_dev->running_req++;
837  	++(*pass);
838  	mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
839  
840  	return rc;
841  
842  err_out_slot_buf:
843  	dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
844  err_out_tag:
845  	mvs_tag_free(mvi, tag);
846  err_out:
847  
848  	dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc);
849  	if (!sas_protocol_ata(task->task_proto))
850  		if (n_elem)
851  			dma_unmap_sg(mvi->dev, task->scatter, n_elem,
852  				     task->data_dir);
853  prep_out:
854  	return rc;
855  }
856  
mvs_task_exec(struct sas_task * task,gfp_t gfp_flags,struct completion * completion,int is_tmf,struct mvs_tmf_task * tmf)857  static int mvs_task_exec(struct sas_task *task, gfp_t gfp_flags,
858  				struct completion *completion, int is_tmf,
859  				struct mvs_tmf_task *tmf)
860  {
861  	struct mvs_info *mvi = NULL;
862  	u32 rc = 0;
863  	u32 pass = 0;
864  	unsigned long flags = 0;
865  
866  	mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info;
867  
868  	spin_lock_irqsave(&mvi->lock, flags);
869  	rc = mvs_task_prep(task, mvi, is_tmf, tmf, &pass);
870  	if (rc)
871  		dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
872  
873  	if (likely(pass))
874  			MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) &
875  				(MVS_CHIP_SLOT_SZ - 1));
876  	spin_unlock_irqrestore(&mvi->lock, flags);
877  
878  	return rc;
879  }
880  
mvs_queue_command(struct sas_task * task,gfp_t gfp_flags)881  int mvs_queue_command(struct sas_task *task, gfp_t gfp_flags)
882  {
883  	return mvs_task_exec(task, gfp_flags, NULL, 0, NULL);
884  }
885  
mvs_slot_free(struct mvs_info * mvi,u32 rx_desc)886  static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
887  {
888  	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
889  	mvs_tag_clear(mvi, slot_idx);
890  }
891  
mvs_slot_task_free(struct mvs_info * mvi,struct sas_task * task,struct mvs_slot_info * slot,u32 slot_idx)892  static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
893  			  struct mvs_slot_info *slot, u32 slot_idx)
894  {
895  	if (!slot)
896  		return;
897  	if (!slot->task)
898  		return;
899  	if (!sas_protocol_ata(task->task_proto))
900  		if (slot->n_elem)
901  			dma_unmap_sg(mvi->dev, task->scatter,
902  				     slot->n_elem, task->data_dir);
903  
904  	switch (task->task_proto) {
905  	case SAS_PROTOCOL_SMP:
906  		dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
907  			     PCI_DMA_FROMDEVICE);
908  		dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
909  			     PCI_DMA_TODEVICE);
910  		break;
911  
912  	case SAS_PROTOCOL_SATA:
913  	case SAS_PROTOCOL_STP:
914  	case SAS_PROTOCOL_SSP:
915  	default:
916  		/* do nothing */
917  		break;
918  	}
919  
920  	if (slot->buf) {
921  		dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma);
922  		slot->buf = NULL;
923  	}
924  	list_del_init(&slot->entry);
925  	task->lldd_task = NULL;
926  	slot->task = NULL;
927  	slot->port = NULL;
928  	slot->slot_tag = 0xFFFFFFFF;
929  	mvs_slot_free(mvi, slot_idx);
930  }
931  
mvs_update_wideport(struct mvs_info * mvi,int phy_no)932  static void mvs_update_wideport(struct mvs_info *mvi, int phy_no)
933  {
934  	struct mvs_phy *phy = &mvi->phy[phy_no];
935  	struct mvs_port *port = phy->port;
936  	int j, no;
937  
938  	for_each_phy(port->wide_port_phymap, j, no) {
939  		if (j & 1) {
940  			MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
941  						PHYR_WIDE_PORT);
942  			MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
943  						port->wide_port_phymap);
944  		} else {
945  			MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
946  						PHYR_WIDE_PORT);
947  			MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
948  						0);
949  		}
950  	}
951  }
952  
mvs_is_phy_ready(struct mvs_info * mvi,int i)953  static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
954  {
955  	u32 tmp;
956  	struct mvs_phy *phy = &mvi->phy[i];
957  	struct mvs_port *port = phy->port;
958  
959  	tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
960  	if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
961  		if (!port)
962  			phy->phy_attached = 1;
963  		return tmp;
964  	}
965  
966  	if (port) {
967  		if (phy->phy_type & PORT_TYPE_SAS) {
968  			port->wide_port_phymap &= ~(1U << i);
969  			if (!port->wide_port_phymap)
970  				port->port_attached = 0;
971  			mvs_update_wideport(mvi, i);
972  		} else if (phy->phy_type & PORT_TYPE_SATA)
973  			port->port_attached = 0;
974  		phy->port = NULL;
975  		phy->phy_attached = 0;
976  		phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
977  	}
978  	return 0;
979  }
980  
mvs_get_d2h_reg(struct mvs_info * mvi,int i,void * buf)981  static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
982  {
983  	u32 *s = (u32 *) buf;
984  
985  	if (!s)
986  		return NULL;
987  
988  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
989  	s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
990  
991  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
992  	s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
993  
994  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
995  	s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
996  
997  	MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
998  	s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i));
999  
1000  	if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
1001  		s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
1002  
1003  	return s;
1004  }
1005  
mvs_is_sig_fis_received(u32 irq_status)1006  static u32 mvs_is_sig_fis_received(u32 irq_status)
1007  {
1008  	return irq_status & PHYEV_SIG_FIS;
1009  }
1010  
mvs_sig_remove_timer(struct mvs_phy * phy)1011  static void mvs_sig_remove_timer(struct mvs_phy *phy)
1012  {
1013  	if (phy->timer.function)
1014  		del_timer(&phy->timer);
1015  	phy->timer.function = NULL;
1016  }
1017  
mvs_update_phyinfo(struct mvs_info * mvi,int i,int get_st)1018  void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
1019  {
1020  	struct mvs_phy *phy = &mvi->phy[i];
1021  	struct sas_identify_frame *id;
1022  
1023  	id = (struct sas_identify_frame *)phy->frame_rcvd;
1024  
1025  	if (get_st) {
1026  		phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
1027  		phy->phy_status = mvs_is_phy_ready(mvi, i);
1028  	}
1029  
1030  	if (phy->phy_status) {
1031  		int oob_done = 0;
1032  		struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
1033  
1034  		oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
1035  
1036  		MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
1037  		if (phy->phy_type & PORT_TYPE_SATA) {
1038  			phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
1039  			if (mvs_is_sig_fis_received(phy->irq_status)) {
1040  				mvs_sig_remove_timer(phy);
1041  				phy->phy_attached = 1;
1042  				phy->att_dev_sas_addr =
1043  					i + mvi->id * mvi->chip->n_phy;
1044  				if (oob_done)
1045  					sas_phy->oob_mode = SATA_OOB_MODE;
1046  				phy->frame_rcvd_size =
1047  				    sizeof(struct dev_to_host_fis);
1048  				mvs_get_d2h_reg(mvi, i, id);
1049  			} else {
1050  				u32 tmp;
1051  				dev_printk(KERN_DEBUG, mvi->dev,
1052  					"Phy%d : No sig fis\n", i);
1053  				tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
1054  				MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
1055  						tmp | PHYEV_SIG_FIS);
1056  				phy->phy_attached = 0;
1057  				phy->phy_type &= ~PORT_TYPE_SATA;
1058  				goto out_done;
1059  			}
1060  		}	else if (phy->phy_type & PORT_TYPE_SAS
1061  			|| phy->att_dev_info & PORT_SSP_INIT_MASK) {
1062  			phy->phy_attached = 1;
1063  			phy->identify.device_type =
1064  				phy->att_dev_info & PORT_DEV_TYPE_MASK;
1065  
1066  			if (phy->identify.device_type == SAS_END_DEVICE)
1067  				phy->identify.target_port_protocols =
1068  							SAS_PROTOCOL_SSP;
1069  			else if (phy->identify.device_type != SAS_PHY_UNUSED)
1070  				phy->identify.target_port_protocols =
1071  							SAS_PROTOCOL_SMP;
1072  			if (oob_done)
1073  				sas_phy->oob_mode = SAS_OOB_MODE;
1074  			phy->frame_rcvd_size =
1075  			    sizeof(struct sas_identify_frame);
1076  		}
1077  		memcpy(sas_phy->attached_sas_addr,
1078  			&phy->att_dev_sas_addr, SAS_ADDR_SIZE);
1079  
1080  		if (MVS_CHIP_DISP->phy_work_around)
1081  			MVS_CHIP_DISP->phy_work_around(mvi, i);
1082  	}
1083  	mv_dprintk("phy %d attach dev info is %x\n",
1084  		i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
1085  	mv_dprintk("phy %d attach sas addr is %llx\n",
1086  		i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
1087  out_done:
1088  	if (get_st)
1089  		MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
1090  }
1091  
mvs_port_notify_formed(struct asd_sas_phy * sas_phy,int lock)1092  static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
1093  {
1094  	struct sas_ha_struct *sas_ha = sas_phy->ha;
1095  	struct mvs_info *mvi = NULL; int i = 0, hi;
1096  	struct mvs_phy *phy = sas_phy->lldd_phy;
1097  	struct asd_sas_port *sas_port = sas_phy->port;
1098  	struct mvs_port *port;
1099  	unsigned long flags = 0;
1100  	if (!sas_port)
1101  		return;
1102  
1103  	while (sas_ha->sas_phy[i]) {
1104  		if (sas_ha->sas_phy[i] == sas_phy)
1105  			break;
1106  		i++;
1107  	}
1108  	hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
1109  	mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
1110  	if (i >= mvi->chip->n_phy)
1111  		port = &mvi->port[i - mvi->chip->n_phy];
1112  	else
1113  		port = &mvi->port[i];
1114  	if (lock)
1115  		spin_lock_irqsave(&mvi->lock, flags);
1116  	port->port_attached = 1;
1117  	phy->port = port;
1118  	sas_port->lldd_port = port;
1119  	if (phy->phy_type & PORT_TYPE_SAS) {
1120  		port->wide_port_phymap = sas_port->phy_mask;
1121  		mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
1122  		mvs_update_wideport(mvi, sas_phy->id);
1123  
1124  		/* direct attached SAS device */
1125  		if (phy->att_dev_info & PORT_SSP_TRGT_MASK) {
1126  			MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
1127  			MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04);
1128  		}
1129  	}
1130  	if (lock)
1131  		spin_unlock_irqrestore(&mvi->lock, flags);
1132  }
1133  
mvs_port_notify_deformed(struct asd_sas_phy * sas_phy,int lock)1134  static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
1135  {
1136  	struct domain_device *dev;
1137  	struct mvs_phy *phy = sas_phy->lldd_phy;
1138  	struct mvs_info *mvi = phy->mvi;
1139  	struct asd_sas_port *port = sas_phy->port;
1140  	int phy_no = 0;
1141  
1142  	while (phy != &mvi->phy[phy_no]) {
1143  		phy_no++;
1144  		if (phy_no >= MVS_MAX_PHYS)
1145  			return;
1146  	}
1147  	list_for_each_entry(dev, &port->dev_list, dev_list_node)
1148  		mvs_do_release_task(phy->mvi, phy_no, dev);
1149  
1150  }
1151  
1152  
mvs_port_formed(struct asd_sas_phy * sas_phy)1153  void mvs_port_formed(struct asd_sas_phy *sas_phy)
1154  {
1155  	mvs_port_notify_formed(sas_phy, 1);
1156  }
1157  
mvs_port_deformed(struct asd_sas_phy * sas_phy)1158  void mvs_port_deformed(struct asd_sas_phy *sas_phy)
1159  {
1160  	mvs_port_notify_deformed(sas_phy, 1);
1161  }
1162  
mvs_alloc_dev(struct mvs_info * mvi)1163  static struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
1164  {
1165  	u32 dev;
1166  	for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
1167  		if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) {
1168  			mvi->devices[dev].device_id = dev;
1169  			return &mvi->devices[dev];
1170  		}
1171  	}
1172  
1173  	if (dev == MVS_MAX_DEVICES)
1174  		mv_printk("max support %d devices, ignore ..\n",
1175  			MVS_MAX_DEVICES);
1176  
1177  	return NULL;
1178  }
1179  
mvs_free_dev(struct mvs_device * mvi_dev)1180  static void mvs_free_dev(struct mvs_device *mvi_dev)
1181  {
1182  	u32 id = mvi_dev->device_id;
1183  	memset(mvi_dev, 0, sizeof(*mvi_dev));
1184  	mvi_dev->device_id = id;
1185  	mvi_dev->dev_type = SAS_PHY_UNUSED;
1186  	mvi_dev->dev_status = MVS_DEV_NORMAL;
1187  	mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
1188  }
1189  
mvs_dev_found_notify(struct domain_device * dev,int lock)1190  static int mvs_dev_found_notify(struct domain_device *dev, int lock)
1191  {
1192  	unsigned long flags = 0;
1193  	int res = 0;
1194  	struct mvs_info *mvi = NULL;
1195  	struct domain_device *parent_dev = dev->parent;
1196  	struct mvs_device *mvi_device;
1197  
1198  	mvi = mvs_find_dev_mvi(dev);
1199  
1200  	if (lock)
1201  		spin_lock_irqsave(&mvi->lock, flags);
1202  
1203  	mvi_device = mvs_alloc_dev(mvi);
1204  	if (!mvi_device) {
1205  		res = -1;
1206  		goto found_out;
1207  	}
1208  	dev->lldd_dev = mvi_device;
1209  	mvi_device->dev_status = MVS_DEV_NORMAL;
1210  	mvi_device->dev_type = dev->dev_type;
1211  	mvi_device->mvi_info = mvi;
1212  	mvi_device->sas_device = dev;
1213  	if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
1214  		int phy_id;
1215  		u8 phy_num = parent_dev->ex_dev.num_phys;
1216  		struct ex_phy *phy;
1217  		for (phy_id = 0; phy_id < phy_num; phy_id++) {
1218  			phy = &parent_dev->ex_dev.ex_phy[phy_id];
1219  			if (SAS_ADDR(phy->attached_sas_addr) ==
1220  				SAS_ADDR(dev->sas_addr)) {
1221  				mvi_device->attached_phy = phy_id;
1222  				break;
1223  			}
1224  		}
1225  
1226  		if (phy_id == phy_num) {
1227  			mv_printk("Error: no attached dev:%016llx"
1228  				"at ex:%016llx.\n",
1229  				SAS_ADDR(dev->sas_addr),
1230  				SAS_ADDR(parent_dev->sas_addr));
1231  			res = -1;
1232  		}
1233  	}
1234  
1235  found_out:
1236  	if (lock)
1237  		spin_unlock_irqrestore(&mvi->lock, flags);
1238  	return res;
1239  }
1240  
mvs_dev_found(struct domain_device * dev)1241  int mvs_dev_found(struct domain_device *dev)
1242  {
1243  	return mvs_dev_found_notify(dev, 1);
1244  }
1245  
mvs_dev_gone_notify(struct domain_device * dev)1246  static void mvs_dev_gone_notify(struct domain_device *dev)
1247  {
1248  	unsigned long flags = 0;
1249  	struct mvs_device *mvi_dev = dev->lldd_dev;
1250  	struct mvs_info *mvi;
1251  
1252  	if (!mvi_dev) {
1253  		mv_dprintk("found dev has gone.\n");
1254  		return;
1255  	}
1256  
1257  	mvi = mvi_dev->mvi_info;
1258  
1259  	spin_lock_irqsave(&mvi->lock, flags);
1260  
1261  	mv_dprintk("found dev[%d:%x] is gone.\n",
1262  		mvi_dev->device_id, mvi_dev->dev_type);
1263  	mvs_release_task(mvi, dev);
1264  	mvs_free_reg_set(mvi, mvi_dev);
1265  	mvs_free_dev(mvi_dev);
1266  
1267  	dev->lldd_dev = NULL;
1268  	mvi_dev->sas_device = NULL;
1269  
1270  	spin_unlock_irqrestore(&mvi->lock, flags);
1271  }
1272  
1273  
mvs_dev_gone(struct domain_device * dev)1274  void mvs_dev_gone(struct domain_device *dev)
1275  {
1276  	mvs_dev_gone_notify(dev);
1277  }
1278  
mvs_task_done(struct sas_task * task)1279  static void mvs_task_done(struct sas_task *task)
1280  {
1281  	if (!del_timer(&task->slow_task->timer))
1282  		return;
1283  	complete(&task->slow_task->completion);
1284  }
1285  
mvs_tmf_timedout(struct timer_list * t)1286  static void mvs_tmf_timedout(struct timer_list *t)
1287  {
1288  	struct sas_task_slow *slow = from_timer(slow, t, timer);
1289  	struct sas_task *task = slow->task;
1290  
1291  	task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1292  	complete(&task->slow_task->completion);
1293  }
1294  
1295  #define MVS_TASK_TIMEOUT 20
mvs_exec_internal_tmf_task(struct domain_device * dev,void * parameter,u32 para_len,struct mvs_tmf_task * tmf)1296  static int mvs_exec_internal_tmf_task(struct domain_device *dev,
1297  			void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
1298  {
1299  	int res, retry;
1300  	struct sas_task *task = NULL;
1301  
1302  	for (retry = 0; retry < 3; retry++) {
1303  		task = sas_alloc_slow_task(GFP_KERNEL);
1304  		if (!task)
1305  			return -ENOMEM;
1306  
1307  		task->dev = dev;
1308  		task->task_proto = dev->tproto;
1309  
1310  		memcpy(&task->ssp_task, parameter, para_len);
1311  		task->task_done = mvs_task_done;
1312  
1313  		task->slow_task->timer.function = mvs_tmf_timedout;
1314  		task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
1315  		add_timer(&task->slow_task->timer);
1316  
1317  		res = mvs_task_exec(task, GFP_KERNEL, NULL, 1, tmf);
1318  
1319  		if (res) {
1320  			del_timer(&task->slow_task->timer);
1321  			mv_printk("executing internal task failed:%d\n", res);
1322  			goto ex_err;
1323  		}
1324  
1325  		wait_for_completion(&task->slow_task->completion);
1326  		res = TMF_RESP_FUNC_FAILED;
1327  		/* Even TMF timed out, return direct. */
1328  		if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
1329  			if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
1330  				mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
1331  				goto ex_err;
1332  			}
1333  		}
1334  
1335  		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1336  		    task->task_status.stat == SAM_STAT_GOOD) {
1337  			res = TMF_RESP_FUNC_COMPLETE;
1338  			break;
1339  		}
1340  
1341  		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1342  		      task->task_status.stat == SAS_DATA_UNDERRUN) {
1343  			/* no error, but return the number of bytes of
1344  			 * underrun */
1345  			res = task->task_status.residual;
1346  			break;
1347  		}
1348  
1349  		if (task->task_status.resp == SAS_TASK_COMPLETE &&
1350  		      task->task_status.stat == SAS_DATA_OVERRUN) {
1351  			mv_dprintk("blocked task error.\n");
1352  			res = -EMSGSIZE;
1353  			break;
1354  		} else {
1355  			mv_dprintk(" task to dev %016llx response: 0x%x "
1356  				    "status 0x%x\n",
1357  				    SAS_ADDR(dev->sas_addr),
1358  				    task->task_status.resp,
1359  				    task->task_status.stat);
1360  			sas_free_task(task);
1361  			task = NULL;
1362  
1363  		}
1364  	}
1365  ex_err:
1366  	BUG_ON(retry == 3 && task != NULL);
1367  	sas_free_task(task);
1368  	return res;
1369  }
1370  
mvs_debug_issue_ssp_tmf(struct domain_device * dev,u8 * lun,struct mvs_tmf_task * tmf)1371  static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
1372  				u8 *lun, struct mvs_tmf_task *tmf)
1373  {
1374  	struct sas_ssp_task ssp_task;
1375  	if (!(dev->tproto & SAS_PROTOCOL_SSP))
1376  		return TMF_RESP_FUNC_ESUPP;
1377  
1378  	memcpy(ssp_task.LUN, lun, 8);
1379  
1380  	return mvs_exec_internal_tmf_task(dev, &ssp_task,
1381  				sizeof(ssp_task), tmf);
1382  }
1383  
1384  
1385  /*  Standard mandates link reset for ATA  (type 0)
1386      and hard reset for SSP (type 1) , only for RECOVERY */
mvs_debug_I_T_nexus_reset(struct domain_device * dev)1387  static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
1388  {
1389  	int rc;
1390  	struct sas_phy *phy = sas_get_local_phy(dev);
1391  	int reset_type = (dev->dev_type == SAS_SATA_DEV ||
1392  			(dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1393  	rc = sas_phy_reset(phy, reset_type);
1394  	sas_put_local_phy(phy);
1395  	msleep(2000);
1396  	return rc;
1397  }
1398  
1399  /* mandatory SAM-3 */
mvs_lu_reset(struct domain_device * dev,u8 * lun)1400  int mvs_lu_reset(struct domain_device *dev, u8 *lun)
1401  {
1402  	unsigned long flags;
1403  	int rc = TMF_RESP_FUNC_FAILED;
1404  	struct mvs_tmf_task tmf_task;
1405  	struct mvs_device * mvi_dev = dev->lldd_dev;
1406  	struct mvs_info *mvi = mvi_dev->mvi_info;
1407  
1408  	tmf_task.tmf = TMF_LU_RESET;
1409  	mvi_dev->dev_status = MVS_DEV_EH;
1410  	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1411  	if (rc == TMF_RESP_FUNC_COMPLETE) {
1412  		spin_lock_irqsave(&mvi->lock, flags);
1413  		mvs_release_task(mvi, dev);
1414  		spin_unlock_irqrestore(&mvi->lock, flags);
1415  	}
1416  	/* If failed, fall-through I_T_Nexus reset */
1417  	mv_printk("%s for device[%x]:rc= %d\n", __func__,
1418  			mvi_dev->device_id, rc);
1419  	return rc;
1420  }
1421  
mvs_I_T_nexus_reset(struct domain_device * dev)1422  int mvs_I_T_nexus_reset(struct domain_device *dev)
1423  {
1424  	unsigned long flags;
1425  	int rc = TMF_RESP_FUNC_FAILED;
1426      struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
1427  	struct mvs_info *mvi = mvi_dev->mvi_info;
1428  
1429  	if (mvi_dev->dev_status != MVS_DEV_EH)
1430  		return TMF_RESP_FUNC_COMPLETE;
1431  	else
1432  		mvi_dev->dev_status = MVS_DEV_NORMAL;
1433  	rc = mvs_debug_I_T_nexus_reset(dev);
1434  	mv_printk("%s for device[%x]:rc= %d\n",
1435  		__func__, mvi_dev->device_id, rc);
1436  
1437  	spin_lock_irqsave(&mvi->lock, flags);
1438  	mvs_release_task(mvi, dev);
1439  	spin_unlock_irqrestore(&mvi->lock, flags);
1440  
1441  	return rc;
1442  }
1443  /* optional SAM-3 */
mvs_query_task(struct sas_task * task)1444  int mvs_query_task(struct sas_task *task)
1445  {
1446  	u32 tag;
1447  	struct scsi_lun lun;
1448  	struct mvs_tmf_task tmf_task;
1449  	int rc = TMF_RESP_FUNC_FAILED;
1450  
1451  	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1452  		struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1453  		struct domain_device *dev = task->dev;
1454  		struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1455  		struct mvs_info *mvi = mvi_dev->mvi_info;
1456  
1457  		int_to_scsilun(cmnd->device->lun, &lun);
1458  		rc = mvs_find_tag(mvi, task, &tag);
1459  		if (rc == 0) {
1460  			rc = TMF_RESP_FUNC_FAILED;
1461  			return rc;
1462  		}
1463  
1464  		tmf_task.tmf = TMF_QUERY_TASK;
1465  		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1466  
1467  		rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1468  		switch (rc) {
1469  		/* The task is still in Lun, release it then */
1470  		case TMF_RESP_FUNC_SUCC:
1471  		/* The task is not in Lun or failed, reset the phy */
1472  		case TMF_RESP_FUNC_FAILED:
1473  		case TMF_RESP_FUNC_COMPLETE:
1474  			break;
1475  		}
1476  	}
1477  	mv_printk("%s:rc= %d\n", __func__, rc);
1478  	return rc;
1479  }
1480  
1481  /*  mandatory SAM-3, still need free task/slot info */
mvs_abort_task(struct sas_task * task)1482  int mvs_abort_task(struct sas_task *task)
1483  {
1484  	struct scsi_lun lun;
1485  	struct mvs_tmf_task tmf_task;
1486  	struct domain_device *dev = task->dev;
1487  	struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
1488  	struct mvs_info *mvi;
1489  	int rc = TMF_RESP_FUNC_FAILED;
1490  	unsigned long flags;
1491  	u32 tag;
1492  
1493  	if (!mvi_dev) {
1494  		mv_printk("Device has removed\n");
1495  		return TMF_RESP_FUNC_FAILED;
1496  	}
1497  
1498  	mvi = mvi_dev->mvi_info;
1499  
1500  	spin_lock_irqsave(&task->task_state_lock, flags);
1501  	if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1502  		spin_unlock_irqrestore(&task->task_state_lock, flags);
1503  		rc = TMF_RESP_FUNC_COMPLETE;
1504  		goto out;
1505  	}
1506  	spin_unlock_irqrestore(&task->task_state_lock, flags);
1507  	mvi_dev->dev_status = MVS_DEV_EH;
1508  	if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
1509  		struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
1510  
1511  		int_to_scsilun(cmnd->device->lun, &lun);
1512  		rc = mvs_find_tag(mvi, task, &tag);
1513  		if (rc == 0) {
1514  			mv_printk("No such tag in %s\n", __func__);
1515  			rc = TMF_RESP_FUNC_FAILED;
1516  			return rc;
1517  		}
1518  
1519  		tmf_task.tmf = TMF_ABORT_TASK;
1520  		tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
1521  
1522  		rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1523  
1524  		/* if successful, clear the task and callback forwards.*/
1525  		if (rc == TMF_RESP_FUNC_COMPLETE) {
1526  			u32 slot_no;
1527  			struct mvs_slot_info *slot;
1528  
1529  			if (task->lldd_task) {
1530  				slot = task->lldd_task;
1531  				slot_no = (u32) (slot - mvi->slot_info);
1532  				spin_lock_irqsave(&mvi->lock, flags);
1533  				mvs_slot_complete(mvi, slot_no, 1);
1534  				spin_unlock_irqrestore(&mvi->lock, flags);
1535  			}
1536  		}
1537  
1538  	} else if (task->task_proto & SAS_PROTOCOL_SATA ||
1539  		task->task_proto & SAS_PROTOCOL_STP) {
1540  		if (SAS_SATA_DEV == dev->dev_type) {
1541  			struct mvs_slot_info *slot = task->lldd_task;
1542  			u32 slot_idx = (u32)(slot - mvi->slot_info);
1543  			mv_dprintk("mvs_abort_task() mvi=%p task=%p "
1544  				   "slot=%p slot_idx=x%x\n",
1545  				   mvi, task, slot, slot_idx);
1546  			task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1547  			mvs_slot_task_free(mvi, task, slot, slot_idx);
1548  			rc = TMF_RESP_FUNC_COMPLETE;
1549  			goto out;
1550  		}
1551  
1552  	}
1553  out:
1554  	if (rc != TMF_RESP_FUNC_COMPLETE)
1555  		mv_printk("%s:rc= %d\n", __func__, rc);
1556  	return rc;
1557  }
1558  
mvs_abort_task_set(struct domain_device * dev,u8 * lun)1559  int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
1560  {
1561  	int rc = TMF_RESP_FUNC_FAILED;
1562  	struct mvs_tmf_task tmf_task;
1563  
1564  	tmf_task.tmf = TMF_ABORT_TASK_SET;
1565  	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1566  
1567  	return rc;
1568  }
1569  
mvs_clear_aca(struct domain_device * dev,u8 * lun)1570  int mvs_clear_aca(struct domain_device *dev, u8 *lun)
1571  {
1572  	int rc = TMF_RESP_FUNC_FAILED;
1573  	struct mvs_tmf_task tmf_task;
1574  
1575  	tmf_task.tmf = TMF_CLEAR_ACA;
1576  	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1577  
1578  	return rc;
1579  }
1580  
mvs_clear_task_set(struct domain_device * dev,u8 * lun)1581  int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
1582  {
1583  	int rc = TMF_RESP_FUNC_FAILED;
1584  	struct mvs_tmf_task tmf_task;
1585  
1586  	tmf_task.tmf = TMF_CLEAR_TASK_SET;
1587  	rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
1588  
1589  	return rc;
1590  }
1591  
mvs_sata_done(struct mvs_info * mvi,struct sas_task * task,u32 slot_idx,int err)1592  static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
1593  			u32 slot_idx, int err)
1594  {
1595  	struct mvs_device *mvi_dev = task->dev->lldd_dev;
1596  	struct task_status_struct *tstat = &task->task_status;
1597  	struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
1598  	int stat = SAM_STAT_GOOD;
1599  
1600  
1601  	resp->frame_len = sizeof(struct dev_to_host_fis);
1602  	memcpy(&resp->ending_fis[0],
1603  	       SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
1604  	       sizeof(struct dev_to_host_fis));
1605  	tstat->buf_valid_size = sizeof(*resp);
1606  	if (unlikely(err)) {
1607  		if (unlikely(err & CMD_ISS_STPD))
1608  			stat = SAS_OPEN_REJECT;
1609  		else
1610  			stat = SAS_PROTO_RESPONSE;
1611         }
1612  
1613  	return stat;
1614  }
1615  
mvs_set_sense(u8 * buffer,int len,int d_sense,int key,int asc,int ascq)1616  static void mvs_set_sense(u8 *buffer, int len, int d_sense,
1617  		int key, int asc, int ascq)
1618  {
1619  	memset(buffer, 0, len);
1620  
1621  	if (d_sense) {
1622  		/* Descriptor format */
1623  		if (len < 4) {
1624  			mv_printk("Length %d of sense buffer too small to "
1625  				"fit sense %x:%x:%x", len, key, asc, ascq);
1626  		}
1627  
1628  		buffer[0] = 0x72;		/* Response Code	*/
1629  		if (len > 1)
1630  			buffer[1] = key;	/* Sense Key */
1631  		if (len > 2)
1632  			buffer[2] = asc;	/* ASC	*/
1633  		if (len > 3)
1634  			buffer[3] = ascq;	/* ASCQ	*/
1635  	} else {
1636  		if (len < 14) {
1637  			mv_printk("Length %d of sense buffer too small to "
1638  				"fit sense %x:%x:%x", len, key, asc, ascq);
1639  		}
1640  
1641  		buffer[0] = 0x70;		/* Response Code	*/
1642  		if (len > 2)
1643  			buffer[2] = key;	/* Sense Key */
1644  		if (len > 7)
1645  			buffer[7] = 0x0a;	/* Additional Sense Length */
1646  		if (len > 12)
1647  			buffer[12] = asc;	/* ASC */
1648  		if (len > 13)
1649  			buffer[13] = ascq; /* ASCQ */
1650  	}
1651  
1652  	return;
1653  }
1654  
mvs_fill_ssp_resp_iu(struct ssp_response_iu * iu,u8 key,u8 asc,u8 asc_q)1655  static void mvs_fill_ssp_resp_iu(struct ssp_response_iu *iu,
1656  				u8 key, u8 asc, u8 asc_q)
1657  {
1658  	iu->datapres = 2;
1659  	iu->response_data_len = 0;
1660  	iu->sense_data_len = 17;
1661  	iu->status = 02;
1662  	mvs_set_sense(iu->sense_data, 17, 0,
1663  			key, asc, asc_q);
1664  }
1665  
mvs_slot_err(struct mvs_info * mvi,struct sas_task * task,u32 slot_idx)1666  static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
1667  			 u32 slot_idx)
1668  {
1669  	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1670  	int stat;
1671  	u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response);
1672  	u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1));
1673  	u32 tfs = 0;
1674  	enum mvs_port_type type = PORT_TYPE_SAS;
1675  
1676  	if (err_dw0 & CMD_ISS_STPD)
1677  		MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
1678  
1679  	MVS_CHIP_DISP->command_active(mvi, slot_idx);
1680  
1681  	stat = SAM_STAT_CHECK_CONDITION;
1682  	switch (task->task_proto) {
1683  	case SAS_PROTOCOL_SSP:
1684  	{
1685  		stat = SAS_ABORTED_TASK;
1686  		if ((err_dw0 & NO_DEST) || err_dw1 & bit(31)) {
1687  			struct ssp_response_iu *iu = slot->response +
1688  				sizeof(struct mvs_err_info);
1689  			mvs_fill_ssp_resp_iu(iu, NOT_READY, 0x04, 01);
1690  			sas_ssp_task_response(mvi->dev, task, iu);
1691  			stat = SAM_STAT_CHECK_CONDITION;
1692  		}
1693  		if (err_dw1 & bit(31))
1694  			mv_printk("reuse same slot, retry command.\n");
1695  		break;
1696  	}
1697  	case SAS_PROTOCOL_SMP:
1698  		stat = SAM_STAT_CHECK_CONDITION;
1699  		break;
1700  
1701  	case SAS_PROTOCOL_SATA:
1702  	case SAS_PROTOCOL_STP:
1703  	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
1704  	{
1705  		task->ata_task.use_ncq = 0;
1706  		stat = SAS_PROTO_RESPONSE;
1707  		mvs_sata_done(mvi, task, slot_idx, err_dw0);
1708  	}
1709  		break;
1710  	default:
1711  		break;
1712  	}
1713  
1714  	return stat;
1715  }
1716  
mvs_slot_complete(struct mvs_info * mvi,u32 rx_desc,u32 flags)1717  int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1718  {
1719  	u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
1720  	struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
1721  	struct sas_task *task = slot->task;
1722  	struct mvs_device *mvi_dev = NULL;
1723  	struct task_status_struct *tstat;
1724  	struct domain_device *dev;
1725  	u32 aborted;
1726  
1727  	void *to;
1728  	enum exec_status sts;
1729  
1730  	if (unlikely(!task || !task->lldd_task || !task->dev))
1731  		return -1;
1732  
1733  	tstat = &task->task_status;
1734  	dev = task->dev;
1735  	mvi_dev = dev->lldd_dev;
1736  
1737  	spin_lock(&task->task_state_lock);
1738  	task->task_state_flags &=
1739  		~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
1740  	task->task_state_flags |= SAS_TASK_STATE_DONE;
1741  	/* race condition*/
1742  	aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
1743  	spin_unlock(&task->task_state_lock);
1744  
1745  	memset(tstat, 0, sizeof(*tstat));
1746  	tstat->resp = SAS_TASK_COMPLETE;
1747  
1748  	if (unlikely(aborted)) {
1749  		tstat->stat = SAS_ABORTED_TASK;
1750  		if (mvi_dev && mvi_dev->running_req)
1751  			mvi_dev->running_req--;
1752  		if (sas_protocol_ata(task->task_proto))
1753  			mvs_free_reg_set(mvi, mvi_dev);
1754  
1755  		mvs_slot_task_free(mvi, task, slot, slot_idx);
1756  		return -1;
1757  	}
1758  
1759  	/* when no device attaching, go ahead and complete by error handling*/
1760  	if (unlikely(!mvi_dev || flags)) {
1761  		if (!mvi_dev)
1762  			mv_dprintk("port has not device.\n");
1763  		tstat->stat = SAS_PHY_DOWN;
1764  		goto out;
1765  	}
1766  
1767  	/*
1768  	 * error info record present; slot->response is 32 bit aligned but may
1769  	 * not be 64 bit aligned, so check for zero in two 32 bit reads
1770  	 */
1771  	if (unlikely((rx_desc & RXQ_ERR)
1772  		     && (*((u32 *)slot->response)
1773  			 || *(((u32 *)slot->response) + 1)))) {
1774  		mv_dprintk("port %d slot %d rx_desc %X has error info"
1775  			"%016llX.\n", slot->port->sas_port.id, slot_idx,
1776  			 rx_desc, get_unaligned_le64(slot->response));
1777  		tstat->stat = mvs_slot_err(mvi, task, slot_idx);
1778  		tstat->resp = SAS_TASK_COMPLETE;
1779  		goto out;
1780  	}
1781  
1782  	switch (task->task_proto) {
1783  	case SAS_PROTOCOL_SSP:
1784  		/* hw says status == 0, datapres == 0 */
1785  		if (rx_desc & RXQ_GOOD) {
1786  			tstat->stat = SAM_STAT_GOOD;
1787  			tstat->resp = SAS_TASK_COMPLETE;
1788  		}
1789  		/* response frame present */
1790  		else if (rx_desc & RXQ_RSP) {
1791  			struct ssp_response_iu *iu = slot->response +
1792  						sizeof(struct mvs_err_info);
1793  			sas_ssp_task_response(mvi->dev, task, iu);
1794  		} else
1795  			tstat->stat = SAM_STAT_CHECK_CONDITION;
1796  		break;
1797  
1798  	case SAS_PROTOCOL_SMP: {
1799  			struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1800  			tstat->stat = SAM_STAT_GOOD;
1801  			to = kmap_atomic(sg_page(sg_resp));
1802  			memcpy(to + sg_resp->offset,
1803  				slot->response + sizeof(struct mvs_err_info),
1804  				sg_dma_len(sg_resp));
1805  			kunmap_atomic(to);
1806  			break;
1807  		}
1808  
1809  	case SAS_PROTOCOL_SATA:
1810  	case SAS_PROTOCOL_STP:
1811  	case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
1812  			tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
1813  			break;
1814  		}
1815  
1816  	default:
1817  		tstat->stat = SAM_STAT_CHECK_CONDITION;
1818  		break;
1819  	}
1820  	if (!slot->port->port_attached) {
1821  		mv_dprintk("port %d has removed.\n", slot->port->sas_port.id);
1822  		tstat->stat = SAS_PHY_DOWN;
1823  	}
1824  
1825  
1826  out:
1827  	if (mvi_dev && mvi_dev->running_req) {
1828  		mvi_dev->running_req--;
1829  		if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req)
1830  			mvs_free_reg_set(mvi, mvi_dev);
1831  	}
1832  	mvs_slot_task_free(mvi, task, slot, slot_idx);
1833  	sts = tstat->stat;
1834  
1835  	spin_unlock(&mvi->lock);
1836  	if (task->task_done)
1837  		task->task_done(task);
1838  
1839  	spin_lock(&mvi->lock);
1840  
1841  	return sts;
1842  }
1843  
mvs_do_release_task(struct mvs_info * mvi,int phy_no,struct domain_device * dev)1844  void mvs_do_release_task(struct mvs_info *mvi,
1845  		int phy_no, struct domain_device *dev)
1846  {
1847  	u32 slot_idx;
1848  	struct mvs_phy *phy;
1849  	struct mvs_port *port;
1850  	struct mvs_slot_info *slot, *slot2;
1851  
1852  	phy = &mvi->phy[phy_no];
1853  	port = phy->port;
1854  	if (!port)
1855  		return;
1856  	/* clean cmpl queue in case request is already finished */
1857  	mvs_int_rx(mvi, false);
1858  
1859  
1860  
1861  	list_for_each_entry_safe(slot, slot2, &port->list, entry) {
1862  		struct sas_task *task;
1863  		slot_idx = (u32) (slot - mvi->slot_info);
1864  		task = slot->task;
1865  
1866  		if (dev && task->dev != dev)
1867  			continue;
1868  
1869  		mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
1870  			slot_idx, slot->slot_tag, task);
1871  		MVS_CHIP_DISP->command_active(mvi, slot_idx);
1872  
1873  		mvs_slot_complete(mvi, slot_idx, 1);
1874  	}
1875  }
1876  
mvs_release_task(struct mvs_info * mvi,struct domain_device * dev)1877  void mvs_release_task(struct mvs_info *mvi,
1878  		      struct domain_device *dev)
1879  {
1880  	int i, phyno[WIDE_PORT_MAX_PHY], num;
1881  	num = mvs_find_dev_phyno(dev, phyno);
1882  	for (i = 0; i < num; i++)
1883  		mvs_do_release_task(mvi, phyno[i], dev);
1884  }
1885  
mvs_phy_disconnected(struct mvs_phy * phy)1886  static void mvs_phy_disconnected(struct mvs_phy *phy)
1887  {
1888  	phy->phy_attached = 0;
1889  	phy->att_dev_info = 0;
1890  	phy->att_dev_sas_addr = 0;
1891  }
1892  
mvs_work_queue(struct work_struct * work)1893  static void mvs_work_queue(struct work_struct *work)
1894  {
1895  	struct delayed_work *dw = container_of(work, struct delayed_work, work);
1896  	struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
1897  	struct mvs_info *mvi = mwq->mvi;
1898  	unsigned long flags;
1899  	u32 phy_no = (unsigned long) mwq->data;
1900  	struct sas_ha_struct *sas_ha = mvi->sas;
1901  	struct mvs_phy *phy = &mvi->phy[phy_no];
1902  	struct asd_sas_phy *sas_phy = &phy->sas_phy;
1903  
1904  	spin_lock_irqsave(&mvi->lock, flags);
1905  	if (mwq->handler & PHY_PLUG_EVENT) {
1906  
1907  		if (phy->phy_event & PHY_PLUG_OUT) {
1908  			u32 tmp;
1909  			struct sas_identify_frame *id;
1910  			id = (struct sas_identify_frame *)phy->frame_rcvd;
1911  			tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
1912  			phy->phy_event &= ~PHY_PLUG_OUT;
1913  			if (!(tmp & PHY_READY_MASK)) {
1914  				sas_phy_disconnected(sas_phy);
1915  				mvs_phy_disconnected(phy);
1916  				sas_ha->notify_phy_event(sas_phy,
1917  					PHYE_LOSS_OF_SIGNAL);
1918  				mv_dprintk("phy%d Removed Device\n", phy_no);
1919  			} else {
1920  				MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
1921  				mvs_update_phyinfo(mvi, phy_no, 1);
1922  				mvs_bytes_dmaed(mvi, phy_no);
1923  				mvs_port_notify_formed(sas_phy, 0);
1924  				mv_dprintk("phy%d Attached Device\n", phy_no);
1925  			}
1926  		}
1927  	} else if (mwq->handler & EXP_BRCT_CHG) {
1928  		phy->phy_event &= ~EXP_BRCT_CHG;
1929  		sas_ha->notify_port_event(sas_phy,
1930  				PORTE_BROADCAST_RCVD);
1931  		mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
1932  	}
1933  	list_del(&mwq->entry);
1934  	spin_unlock_irqrestore(&mvi->lock, flags);
1935  	kfree(mwq);
1936  }
1937  
mvs_handle_event(struct mvs_info * mvi,void * data,int handler)1938  static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
1939  {
1940  	struct mvs_wq *mwq;
1941  	int ret = 0;
1942  
1943  	mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
1944  	if (mwq) {
1945  		mwq->mvi = mvi;
1946  		mwq->data = data;
1947  		mwq->handler = handler;
1948  		MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
1949  		list_add_tail(&mwq->entry, &mvi->wq_list);
1950  		schedule_delayed_work(&mwq->work_q, HZ * 2);
1951  	} else
1952  		ret = -ENOMEM;
1953  
1954  	return ret;
1955  }
1956  
mvs_sig_time_out(struct timer_list * t)1957  static void mvs_sig_time_out(struct timer_list *t)
1958  {
1959  	struct mvs_phy *phy = from_timer(phy, t, timer);
1960  	struct mvs_info *mvi = phy->mvi;
1961  	u8 phy_no;
1962  
1963  	for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
1964  		if (&mvi->phy[phy_no] == phy) {
1965  			mv_dprintk("Get signature time out, reset phy %d\n",
1966  				phy_no+mvi->id*mvi->chip->n_phy);
1967  			MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET);
1968  		}
1969  	}
1970  }
1971  
mvs_int_port(struct mvs_info * mvi,int phy_no,u32 events)1972  void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
1973  {
1974  	u32 tmp;
1975  	struct mvs_phy *phy = &mvi->phy[phy_no];
1976  
1977  	phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
1978  	MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
1979  	mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy,
1980  		MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
1981  	mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy,
1982  		phy->irq_status);
1983  
1984  	/*
1985  	* events is port event now ,
1986  	* we need check the interrupt status which belongs to per port.
1987  	*/
1988  
1989  	if (phy->irq_status & PHYEV_DCDR_ERR) {
1990  		mv_dprintk("phy %d STP decoding error.\n",
1991  		phy_no + mvi->id*mvi->chip->n_phy);
1992  	}
1993  
1994  	if (phy->irq_status & PHYEV_POOF) {
1995  		mdelay(500);
1996  		if (!(phy->phy_event & PHY_PLUG_OUT)) {
1997  			int dev_sata = phy->phy_type & PORT_TYPE_SATA;
1998  			int ready;
1999  			mvs_do_release_task(mvi, phy_no, NULL);
2000  			phy->phy_event |= PHY_PLUG_OUT;
2001  			MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1);
2002  			mvs_handle_event(mvi,
2003  				(void *)(unsigned long)phy_no,
2004  				PHY_PLUG_EVENT);
2005  			ready = mvs_is_phy_ready(mvi, phy_no);
2006  			if (ready || dev_sata) {
2007  				if (MVS_CHIP_DISP->stp_reset)
2008  					MVS_CHIP_DISP->stp_reset(mvi,
2009  							phy_no);
2010  				else
2011  					MVS_CHIP_DISP->phy_reset(mvi,
2012  							phy_no, MVS_SOFT_RESET);
2013  				return;
2014  			}
2015  		}
2016  	}
2017  
2018  	if (phy->irq_status & PHYEV_COMWAKE) {
2019  		tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
2020  		MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
2021  					tmp | PHYEV_SIG_FIS);
2022  		if (phy->timer.function == NULL) {
2023  			phy->timer.function = mvs_sig_time_out;
2024  			phy->timer.expires = jiffies + 5*HZ;
2025  			add_timer(&phy->timer);
2026  		}
2027  	}
2028  	if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
2029  		phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
2030  		mv_dprintk("notify plug in on phy[%d]\n", phy_no);
2031  		if (phy->phy_status) {
2032  			mdelay(10);
2033  			MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
2034  			if (phy->phy_type & PORT_TYPE_SATA) {
2035  				tmp = MVS_CHIP_DISP->read_port_irq_mask(
2036  						mvi, phy_no);
2037  				tmp &= ~PHYEV_SIG_FIS;
2038  				MVS_CHIP_DISP->write_port_irq_mask(mvi,
2039  							phy_no, tmp);
2040  			}
2041  			mvs_update_phyinfo(mvi, phy_no, 0);
2042  			if (phy->phy_type & PORT_TYPE_SAS) {
2043  				MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE);
2044  				mdelay(10);
2045  			}
2046  
2047  			mvs_bytes_dmaed(mvi, phy_no);
2048  			/* whether driver is going to handle hot plug */
2049  			if (phy->phy_event & PHY_PLUG_OUT) {
2050  				mvs_port_notify_formed(&phy->sas_phy, 0);
2051  				phy->phy_event &= ~PHY_PLUG_OUT;
2052  			}
2053  		} else {
2054  			mv_dprintk("plugin interrupt but phy%d is gone\n",
2055  				phy_no + mvi->id*mvi->chip->n_phy);
2056  		}
2057  	} else if (phy->irq_status & PHYEV_BROAD_CH) {
2058  		mv_dprintk("phy %d broadcast change.\n",
2059  			phy_no + mvi->id*mvi->chip->n_phy);
2060  		mvs_handle_event(mvi, (void *)(unsigned long)phy_no,
2061  				EXP_BRCT_CHG);
2062  	}
2063  }
2064  
mvs_int_rx(struct mvs_info * mvi,bool self_clear)2065  int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
2066  {
2067  	u32 rx_prod_idx, rx_desc;
2068  	bool attn = false;
2069  
2070  	/* the first dword in the RX ring is special: it contains
2071  	 * a mirror of the hardware's RX producer index, so that
2072  	 * we don't have to stall the CPU reading that register.
2073  	 * The actual RX ring is offset by one dword, due to this.
2074  	 */
2075  	rx_prod_idx = mvi->rx_cons;
2076  	mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
2077  	if (mvi->rx_cons == 0xfff)	/* h/w hasn't touched RX ring yet */
2078  		return 0;
2079  
2080  	/* The CMPL_Q may come late, read from register and try again
2081  	* note: if coalescing is enabled,
2082  	* it will need to read from register every time for sure
2083  	*/
2084  	if (unlikely(mvi->rx_cons == rx_prod_idx))
2085  		mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
2086  
2087  	if (mvi->rx_cons == rx_prod_idx)
2088  		return 0;
2089  
2090  	while (mvi->rx_cons != rx_prod_idx) {
2091  		/* increment our internal RX consumer pointer */
2092  		rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
2093  		rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
2094  
2095  		if (likely(rx_desc & RXQ_DONE))
2096  			mvs_slot_complete(mvi, rx_desc, 0);
2097  		if (rx_desc & RXQ_ATTN) {
2098  			attn = true;
2099  		} else if (rx_desc & RXQ_ERR) {
2100  			if (!(rx_desc & RXQ_DONE))
2101  				mvs_slot_complete(mvi, rx_desc, 0);
2102  		} else if (rx_desc & RXQ_SLOT_RESET) {
2103  			mvs_slot_free(mvi, rx_desc);
2104  		}
2105  	}
2106  
2107  	if (attn && self_clear)
2108  		MVS_CHIP_DISP->int_full(mvi);
2109  	return 0;
2110  }
2111  
mvs_gpio_write(struct sas_ha_struct * sha,u8 reg_type,u8 reg_index,u8 reg_count,u8 * write_data)2112  int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index,
2113  			u8 reg_count, u8 *write_data)
2114  {
2115  	struct mvs_prv_info *mvs_prv = sha->lldd_ha;
2116  	struct mvs_info *mvi = mvs_prv->mvi[0];
2117  
2118  	if (MVS_CHIP_DISP->gpio_write) {
2119  		return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type,
2120  			reg_index, reg_count, write_data);
2121  	}
2122  
2123  	return -ENOSYS;
2124  }
2125