1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43
44 /**
45 * pm8001_find_tag - from sas task to find out tag that belongs to this task
46 * @task: the task sent to the LLDD
47 * @tag: the found tag associated with the task
48 */
pm8001_find_tag(struct sas_task * task,u32 * tag)49 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
50 {
51 if (task->lldd_task) {
52 struct pm8001_ccb_info *ccb;
53 ccb = task->lldd_task;
54 *tag = ccb->ccb_tag;
55 return 1;
56 }
57 return 0;
58 }
59
60 /**
61 * pm8001_tag_free - free the no more needed tag
62 * @pm8001_ha: our hba struct
63 * @tag: the found tag associated with the task
64 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)65 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
66 {
67 void *bitmap = pm8001_ha->tags;
68 clear_bit(tag, bitmap);
69 }
70
71 /**
72 * pm8001_tag_alloc - allocate a empty tag for task used.
73 * @pm8001_ha: our hba struct
74 * @tag_out: the found empty tag .
75 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)76 inline int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
77 {
78 unsigned int tag;
79 void *bitmap = pm8001_ha->tags;
80 unsigned long flags;
81
82 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
83 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
84 if (tag >= pm8001_ha->tags_num) {
85 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
86 return -SAS_QUEUE_FULL;
87 }
88 set_bit(tag, bitmap);
89 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
90 *tag_out = tag;
91 return 0;
92 }
93
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)94 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
95 {
96 int i;
97 for (i = 0; i < pm8001_ha->tags_num; ++i)
98 pm8001_tag_free(pm8001_ha, i);
99 }
100
101 /**
102 * pm8001_mem_alloc - allocate memory for pm8001.
103 * @pdev: pci device.
104 * @virt_addr: the allocated virtual address
105 * @pphys_addr_hi: the physical address high byte address.
106 * @pphys_addr_lo: the physical address low byte address.
107 * @mem_size: memory size.
108 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)109 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
110 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
111 u32 *pphys_addr_lo, u32 mem_size, u32 align)
112 {
113 caddr_t mem_virt_alloc;
114 dma_addr_t mem_dma_handle;
115 u64 phys_align;
116 u64 align_offset = 0;
117 if (align)
118 align_offset = (dma_addr_t)align - 1;
119 mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
120 &mem_dma_handle);
121 if (!mem_virt_alloc) {
122 pm8001_printk("memory allocation error\n");
123 return -1;
124 }
125 *pphys_addr = mem_dma_handle;
126 phys_align = (*pphys_addr + align_offset) & ~align_offset;
127 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
128 *pphys_addr_hi = upper_32_bits(phys_align);
129 *pphys_addr_lo = lower_32_bits(phys_align);
130 return 0;
131 }
132 /**
133 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
134 * find out our hba struct.
135 * @dev: the domain device which from sas layer.
136 */
137 static
pm8001_find_ha_by_dev(struct domain_device * dev)138 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
139 {
140 struct sas_ha_struct *sha = dev->port->ha;
141 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
142 return pm8001_ha;
143 }
144
145 /**
146 * pm8001_phy_control - this function should be registered to
147 * sas_domain_function_template to provide libsas used, note: this is just
148 * control the HBA phy rather than other expander phy if you want control
149 * other phy, you should use SMP command.
150 * @sas_phy: which phy in HBA phys.
151 * @func: the operation.
152 * @funcdata: always NULL.
153 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)154 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
155 void *funcdata)
156 {
157 int rc = 0, phy_id = sas_phy->id;
158 struct pm8001_hba_info *pm8001_ha = NULL;
159 struct sas_phy_linkrates *rates;
160 DECLARE_COMPLETION_ONSTACK(completion);
161 unsigned long flags;
162 pm8001_ha = sas_phy->ha->lldd_ha;
163 pm8001_ha->phy[phy_id].enable_completion = &completion;
164 switch (func) {
165 case PHY_FUNC_SET_LINK_RATE:
166 rates = funcdata;
167 if (rates->minimum_linkrate) {
168 pm8001_ha->phy[phy_id].minimum_linkrate =
169 rates->minimum_linkrate;
170 }
171 if (rates->maximum_linkrate) {
172 pm8001_ha->phy[phy_id].maximum_linkrate =
173 rates->maximum_linkrate;
174 }
175 if (pm8001_ha->phy[phy_id].phy_state == 0) {
176 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
177 wait_for_completion(&completion);
178 }
179 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
180 PHY_LINK_RESET);
181 break;
182 case PHY_FUNC_HARD_RESET:
183 if (pm8001_ha->phy[phy_id].phy_state == 0) {
184 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
185 wait_for_completion(&completion);
186 }
187 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
188 PHY_HARD_RESET);
189 break;
190 case PHY_FUNC_LINK_RESET:
191 if (pm8001_ha->phy[phy_id].phy_state == 0) {
192 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
193 wait_for_completion(&completion);
194 }
195 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
196 PHY_LINK_RESET);
197 break;
198 case PHY_FUNC_RELEASE_SPINUP_HOLD:
199 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
200 PHY_LINK_RESET);
201 break;
202 case PHY_FUNC_DISABLE:
203 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
204 break;
205 case PHY_FUNC_GET_EVENTS:
206 spin_lock_irqsave(&pm8001_ha->lock, flags);
207 if (pm8001_ha->chip_id == chip_8001) {
208 if (-1 == pm8001_bar4_shift(pm8001_ha,
209 (phy_id < 4) ? 0x30000 : 0x40000)) {
210 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
211 return -EINVAL;
212 }
213 }
214 {
215 struct sas_phy *phy = sas_phy->phy;
216 uint32_t *qp = (uint32_t *)(((char *)
217 pm8001_ha->io_mem[2].memvirtaddr)
218 + 0x1034 + (0x4000 * (phy_id & 3)));
219
220 phy->invalid_dword_count = qp[0];
221 phy->running_disparity_error_count = qp[1];
222 phy->loss_of_dword_sync_count = qp[3];
223 phy->phy_reset_problem_count = qp[4];
224 }
225 if (pm8001_ha->chip_id == chip_8001)
226 pm8001_bar4_shift(pm8001_ha, 0);
227 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
228 return 0;
229 default:
230 rc = -EOPNOTSUPP;
231 }
232 msleep(300);
233 return rc;
234 }
235
236 /**
237 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
238 * command to HBA.
239 * @shost: the scsi host data.
240 */
pm8001_scan_start(struct Scsi_Host * shost)241 void pm8001_scan_start(struct Scsi_Host *shost)
242 {
243 int i;
244 struct pm8001_hba_info *pm8001_ha;
245 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
246 pm8001_ha = sha->lldd_ha;
247 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
248 if (pm8001_ha->chip_id == chip_8001)
249 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
250 for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
251 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
252 }
253
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)254 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
255 {
256 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
257
258 /* give the phy enabling interrupt event time to come in (1s
259 * is empirically about all it takes) */
260 if (time < HZ)
261 return 0;
262 /* Wait for discovery to finish */
263 sas_drain_work(ha);
264 return 1;
265 }
266
267 /**
268 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
269 * @pm8001_ha: our hba card information
270 * @ccb: the ccb which attached to smp task
271 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)272 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
273 struct pm8001_ccb_info *ccb)
274 {
275 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
276 }
277
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)278 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
279 {
280 struct ata_queued_cmd *qc = task->uldd_task;
281 if (qc) {
282 if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
283 qc->tf.command == ATA_CMD_FPDMA_READ ||
284 qc->tf.command == ATA_CMD_FPDMA_RECV ||
285 qc->tf.command == ATA_CMD_FPDMA_SEND ||
286 qc->tf.command == ATA_CMD_NCQ_NON_DATA) {
287 *tag = qc->tag;
288 return 1;
289 }
290 }
291 return 0;
292 }
293
294 /**
295 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
296 * @pm8001_ha: our hba card information
297 * @ccb: the ccb which attached to sata task
298 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)299 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
300 struct pm8001_ccb_info *ccb)
301 {
302 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
303 }
304
305 /**
306 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
307 * @pm8001_ha: our hba card information
308 * @ccb: the ccb which attached to TM
309 * @tmf: the task management IU
310 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct pm8001_tmf_task * tmf)311 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
312 struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf)
313 {
314 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
315 }
316
317 /**
318 * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task
319 * @pm8001_ha: our hba card information
320 * @ccb: the ccb which attached to ssp task
321 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)322 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
323 struct pm8001_ccb_info *ccb)
324 {
325 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
326 }
327
328 /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)329 static int sas_find_local_port_id(struct domain_device *dev)
330 {
331 struct domain_device *pdev = dev->parent;
332
333 /* Directly attached device */
334 if (!pdev)
335 return dev->port->id;
336 while (pdev) {
337 struct domain_device *pdev_p = pdev->parent;
338 if (!pdev_p)
339 return pdev->port->id;
340 pdev = pdev->parent;
341 }
342 return 0;
343 }
344
345 /**
346 * pm8001_task_exec - queue the task(ssp, smp && ata) to the hardware.
347 * @task: the task to be execute.
348 * @num: if can_queue great than 1, the task can be queued up. for SMP task,
349 * we always execute one one time.
350 * @gfp_flags: gfp_flags.
351 * @is_tmf: if it is task management task.
352 * @tmf: the task management IU
353 */
354 #define DEV_IS_GONE(pm8001_dev) \
355 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
pm8001_task_exec(struct sas_task * task,gfp_t gfp_flags,int is_tmf,struct pm8001_tmf_task * tmf)356 static int pm8001_task_exec(struct sas_task *task,
357 gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf)
358 {
359 struct domain_device *dev = task->dev;
360 struct pm8001_hba_info *pm8001_ha;
361 struct pm8001_device *pm8001_dev;
362 struct pm8001_port *port = NULL;
363 struct sas_task *t = task;
364 struct pm8001_ccb_info *ccb;
365 u32 tag = 0xdeadbeef, rc, n_elem = 0;
366 unsigned long flags = 0;
367
368 if (!dev->port) {
369 struct task_status_struct *tsm = &t->task_status;
370 tsm->resp = SAS_TASK_UNDELIVERED;
371 tsm->stat = SAS_PHY_DOWN;
372 if (dev->dev_type != SAS_SATA_DEV)
373 t->task_done(t);
374 return 0;
375 }
376 pm8001_ha = pm8001_find_ha_by_dev(task->dev);
377 PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
378 spin_lock_irqsave(&pm8001_ha->lock, flags);
379 do {
380 dev = t->dev;
381 pm8001_dev = dev->lldd_dev;
382 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
383 if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) {
384 if (sas_protocol_ata(t->task_proto)) {
385 struct task_status_struct *ts = &t->task_status;
386 ts->resp = SAS_TASK_UNDELIVERED;
387 ts->stat = SAS_PHY_DOWN;
388
389 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
390 t->task_done(t);
391 spin_lock_irqsave(&pm8001_ha->lock, flags);
392 continue;
393 } else {
394 struct task_status_struct *ts = &t->task_status;
395 ts->resp = SAS_TASK_UNDELIVERED;
396 ts->stat = SAS_PHY_DOWN;
397 t->task_done(t);
398 continue;
399 }
400 }
401 rc = pm8001_tag_alloc(pm8001_ha, &tag);
402 if (rc)
403 goto err_out;
404 ccb = &pm8001_ha->ccb_info[tag];
405
406 if (!sas_protocol_ata(t->task_proto)) {
407 if (t->num_scatter) {
408 n_elem = dma_map_sg(pm8001_ha->dev,
409 t->scatter,
410 t->num_scatter,
411 t->data_dir);
412 if (!n_elem) {
413 rc = -ENOMEM;
414 goto err_out_tag;
415 }
416 }
417 } else {
418 n_elem = t->num_scatter;
419 }
420
421 t->lldd_task = ccb;
422 ccb->n_elem = n_elem;
423 ccb->ccb_tag = tag;
424 ccb->task = t;
425 ccb->device = pm8001_dev;
426 switch (t->task_proto) {
427 case SAS_PROTOCOL_SMP:
428 rc = pm8001_task_prep_smp(pm8001_ha, ccb);
429 break;
430 case SAS_PROTOCOL_SSP:
431 if (is_tmf)
432 rc = pm8001_task_prep_ssp_tm(pm8001_ha,
433 ccb, tmf);
434 else
435 rc = pm8001_task_prep_ssp(pm8001_ha, ccb);
436 break;
437 case SAS_PROTOCOL_SATA:
438 case SAS_PROTOCOL_STP:
439 rc = pm8001_task_prep_ata(pm8001_ha, ccb);
440 break;
441 default:
442 dev_printk(KERN_ERR, pm8001_ha->dev,
443 "unknown sas_task proto: 0x%x\n",
444 t->task_proto);
445 rc = -EINVAL;
446 break;
447 }
448
449 if (rc) {
450 PM8001_IO_DBG(pm8001_ha,
451 pm8001_printk("rc is %x\n", rc));
452 goto err_out_tag;
453 }
454 /* TODO: select normal or high priority */
455 spin_lock(&t->task_state_lock);
456 t->task_state_flags |= SAS_TASK_AT_INITIATOR;
457 spin_unlock(&t->task_state_lock);
458 pm8001_dev->running_req++;
459 } while (0);
460 rc = 0;
461 goto out_done;
462
463 err_out_tag:
464 pm8001_tag_free(pm8001_ha, tag);
465 err_out:
466 dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
467 if (!sas_protocol_ata(t->task_proto))
468 if (n_elem)
469 dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
470 t->data_dir);
471 out_done:
472 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
473 return rc;
474 }
475
476 /**
477 * pm8001_queue_command - register for upper layer used, all IO commands sent
478 * to HBA are from this interface.
479 * @task: the task to be execute.
480 * @gfp_flags: gfp_flags
481 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)482 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
483 {
484 return pm8001_task_exec(task, gfp_flags, 0, NULL);
485 }
486
487 /**
488 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
489 * @pm8001_ha: our hba card information
490 * @ccb: the ccb which attached to ssp task
491 * @task: the task to be free.
492 * @ccb_idx: ccb index.
493 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct sas_task * task,struct pm8001_ccb_info * ccb,u32 ccb_idx)494 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
495 struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
496 {
497 if (!ccb->task)
498 return;
499 if (!sas_protocol_ata(task->task_proto))
500 if (ccb->n_elem)
501 dma_unmap_sg(pm8001_ha->dev, task->scatter,
502 task->num_scatter, task->data_dir);
503
504 switch (task->task_proto) {
505 case SAS_PROTOCOL_SMP:
506 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
507 PCI_DMA_FROMDEVICE);
508 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
509 PCI_DMA_TODEVICE);
510 break;
511
512 case SAS_PROTOCOL_SATA:
513 case SAS_PROTOCOL_STP:
514 case SAS_PROTOCOL_SSP:
515 default:
516 /* do nothing */
517 break;
518 }
519 task->lldd_task = NULL;
520 ccb->task = NULL;
521 ccb->ccb_tag = 0xFFFFFFFF;
522 ccb->open_retry = 0;
523 pm8001_tag_free(pm8001_ha, ccb_idx);
524 }
525
526 /**
527 * pm8001_alloc_dev - find a empty pm8001_device
528 * @pm8001_ha: our hba card information
529 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)530 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
531 {
532 u32 dev;
533 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
534 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
535 pm8001_ha->devices[dev].id = dev;
536 return &pm8001_ha->devices[dev];
537 }
538 }
539 if (dev == PM8001_MAX_DEVICES) {
540 PM8001_FAIL_DBG(pm8001_ha,
541 pm8001_printk("max support %d devices, ignore ..\n",
542 PM8001_MAX_DEVICES));
543 }
544 return NULL;
545 }
546 /**
547 * pm8001_find_dev - find a matching pm8001_device
548 * @pm8001_ha: our hba card information
549 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)550 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
551 u32 device_id)
552 {
553 u32 dev;
554 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
555 if (pm8001_ha->devices[dev].device_id == device_id)
556 return &pm8001_ha->devices[dev];
557 }
558 if (dev == PM8001_MAX_DEVICES) {
559 PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING "
560 "DEVICE FOUND !!!\n"));
561 }
562 return NULL;
563 }
564
pm8001_free_dev(struct pm8001_device * pm8001_dev)565 static void pm8001_free_dev(struct pm8001_device *pm8001_dev)
566 {
567 u32 id = pm8001_dev->id;
568 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
569 pm8001_dev->id = id;
570 pm8001_dev->dev_type = SAS_PHY_UNUSED;
571 pm8001_dev->device_id = PM8001_MAX_DEVICES;
572 pm8001_dev->sas_device = NULL;
573 }
574
575 /**
576 * pm8001_dev_found_notify - libsas notify a device is found.
577 * @dev: the device structure which sas layer used.
578 *
579 * when libsas find a sas domain device, it should tell the LLDD that
580 * device is found, and then LLDD register this device to HBA firmware
581 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
582 * device ID(according to device's sas address) and returned it to LLDD. From
583 * now on, we communicate with HBA FW with the device ID which HBA assigned
584 * rather than sas address. it is the necessary step for our HBA but it is
585 * the optional for other HBA driver.
586 */
pm8001_dev_found_notify(struct domain_device * dev)587 static int pm8001_dev_found_notify(struct domain_device *dev)
588 {
589 unsigned long flags = 0;
590 int res = 0;
591 struct pm8001_hba_info *pm8001_ha = NULL;
592 struct domain_device *parent_dev = dev->parent;
593 struct pm8001_device *pm8001_device;
594 DECLARE_COMPLETION_ONSTACK(completion);
595 u32 flag = 0;
596 pm8001_ha = pm8001_find_ha_by_dev(dev);
597 spin_lock_irqsave(&pm8001_ha->lock, flags);
598
599 pm8001_device = pm8001_alloc_dev(pm8001_ha);
600 if (!pm8001_device) {
601 res = -1;
602 goto found_out;
603 }
604 pm8001_device->sas_device = dev;
605 dev->lldd_dev = pm8001_device;
606 pm8001_device->dev_type = dev->dev_type;
607 pm8001_device->dcompletion = &completion;
608 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
609 int phy_id;
610 struct ex_phy *phy;
611 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
612 phy_id++) {
613 phy = &parent_dev->ex_dev.ex_phy[phy_id];
614 if (SAS_ADDR(phy->attached_sas_addr)
615 == SAS_ADDR(dev->sas_addr)) {
616 pm8001_device->attached_phy = phy_id;
617 break;
618 }
619 }
620 if (phy_id == parent_dev->ex_dev.num_phys) {
621 PM8001_FAIL_DBG(pm8001_ha,
622 pm8001_printk("Error: no attached dev:%016llx"
623 " at ex:%016llx.\n", SAS_ADDR(dev->sas_addr),
624 SAS_ADDR(parent_dev->sas_addr)));
625 res = -1;
626 }
627 } else {
628 if (dev->dev_type == SAS_SATA_DEV) {
629 pm8001_device->attached_phy =
630 dev->rphy->identify.phy_identifier;
631 flag = 1; /* directly sata*/
632 }
633 } /*register this device to HBA*/
634 PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n"));
635 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
636 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
637 wait_for_completion(&completion);
638 if (dev->dev_type == SAS_END_DEVICE)
639 msleep(50);
640 pm8001_ha->flags = PM8001F_RUN_TIME;
641 return 0;
642 found_out:
643 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
644 return res;
645 }
646
pm8001_dev_found(struct domain_device * dev)647 int pm8001_dev_found(struct domain_device *dev)
648 {
649 return pm8001_dev_found_notify(dev);
650 }
651
pm8001_task_done(struct sas_task * task)652 void pm8001_task_done(struct sas_task *task)
653 {
654 if (!del_timer(&task->slow_task->timer))
655 return;
656 complete(&task->slow_task->completion);
657 }
658
pm8001_tmf_timedout(struct timer_list * t)659 static void pm8001_tmf_timedout(struct timer_list *t)
660 {
661 struct sas_task_slow *slow = from_timer(slow, t, timer);
662 struct sas_task *task = slow->task;
663
664 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
665 complete(&task->slow_task->completion);
666 }
667
668 #define PM8001_TASK_TIMEOUT 20
669 /**
670 * pm8001_exec_internal_tmf_task - execute some task management commands.
671 * @dev: the wanted device.
672 * @tmf: which task management wanted to be take.
673 * @para_len: para_len.
674 * @parameter: ssp task parameter.
675 *
676 * when errors or exception happened, we may want to do something, for example
677 * abort the issued task which result in this execption, it is done by calling
678 * this function, note it is also with the task execute interface.
679 */
pm8001_exec_internal_tmf_task(struct domain_device * dev,void * parameter,u32 para_len,struct pm8001_tmf_task * tmf)680 static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
681 void *parameter, u32 para_len, struct pm8001_tmf_task *tmf)
682 {
683 int res, retry;
684 struct sas_task *task = NULL;
685 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
686 struct pm8001_device *pm8001_dev = dev->lldd_dev;
687 DECLARE_COMPLETION_ONSTACK(completion_setstate);
688
689 for (retry = 0; retry < 3; retry++) {
690 task = sas_alloc_slow_task(GFP_KERNEL);
691 if (!task)
692 return -ENOMEM;
693
694 task->dev = dev;
695 task->task_proto = dev->tproto;
696 memcpy(&task->ssp_task, parameter, para_len);
697 task->task_done = pm8001_task_done;
698 task->slow_task->timer.function = pm8001_tmf_timedout;
699 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
700 add_timer(&task->slow_task->timer);
701
702 res = pm8001_task_exec(task, GFP_KERNEL, 1, tmf);
703
704 if (res) {
705 del_timer(&task->slow_task->timer);
706 PM8001_FAIL_DBG(pm8001_ha,
707 pm8001_printk("Executing internal task "
708 "failed\n"));
709 goto ex_err;
710 }
711 wait_for_completion(&task->slow_task->completion);
712 if (pm8001_ha->chip_id != chip_8001) {
713 pm8001_dev->setds_completion = &completion_setstate;
714 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
715 pm8001_dev, 0x01);
716 wait_for_completion(&completion_setstate);
717 }
718 res = -TMF_RESP_FUNC_FAILED;
719 /* Even TMF timed out, return direct. */
720 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
721 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
722 PM8001_FAIL_DBG(pm8001_ha,
723 pm8001_printk("TMF task[%x]timeout.\n",
724 tmf->tmf));
725 goto ex_err;
726 }
727 }
728
729 if (task->task_status.resp == SAS_TASK_COMPLETE &&
730 task->task_status.stat == SAM_STAT_GOOD) {
731 res = TMF_RESP_FUNC_COMPLETE;
732 break;
733 }
734
735 if (task->task_status.resp == SAS_TASK_COMPLETE &&
736 task->task_status.stat == SAS_DATA_UNDERRUN) {
737 /* no error, but return the number of bytes of
738 * underrun */
739 res = task->task_status.residual;
740 break;
741 }
742
743 if (task->task_status.resp == SAS_TASK_COMPLETE &&
744 task->task_status.stat == SAS_DATA_OVERRUN) {
745 PM8001_FAIL_DBG(pm8001_ha,
746 pm8001_printk("Blocked task error.\n"));
747 res = -EMSGSIZE;
748 break;
749 } else {
750 PM8001_EH_DBG(pm8001_ha,
751 pm8001_printk(" Task to dev %016llx response:"
752 "0x%x status 0x%x\n",
753 SAS_ADDR(dev->sas_addr),
754 task->task_status.resp,
755 task->task_status.stat));
756 sas_free_task(task);
757 task = NULL;
758 }
759 }
760 ex_err:
761 BUG_ON(retry == 3 && task != NULL);
762 sas_free_task(task);
763 return res;
764 }
765
766 static int
pm8001_exec_internal_task_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_device * pm8001_dev,struct domain_device * dev,u32 flag,u32 task_tag)767 pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
768 struct pm8001_device *pm8001_dev, struct domain_device *dev, u32 flag,
769 u32 task_tag)
770 {
771 int res, retry;
772 u32 ccb_tag;
773 struct pm8001_ccb_info *ccb;
774 struct sas_task *task = NULL;
775
776 for (retry = 0; retry < 3; retry++) {
777 task = sas_alloc_slow_task(GFP_KERNEL);
778 if (!task)
779 return -ENOMEM;
780
781 task->dev = dev;
782 task->task_proto = dev->tproto;
783 task->task_done = pm8001_task_done;
784 task->slow_task->timer.function = pm8001_tmf_timedout;
785 task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
786 add_timer(&task->slow_task->timer);
787
788 res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
789 if (res)
790 return res;
791 ccb = &pm8001_ha->ccb_info[ccb_tag];
792 ccb->device = pm8001_dev;
793 ccb->ccb_tag = ccb_tag;
794 ccb->task = task;
795 ccb->n_elem = 0;
796
797 res = PM8001_CHIP_DISP->task_abort(pm8001_ha,
798 pm8001_dev, flag, task_tag, ccb_tag);
799
800 if (res) {
801 del_timer(&task->slow_task->timer);
802 PM8001_FAIL_DBG(pm8001_ha,
803 pm8001_printk("Executing internal task "
804 "failed\n"));
805 goto ex_err;
806 }
807 wait_for_completion(&task->slow_task->completion);
808 res = TMF_RESP_FUNC_FAILED;
809 /* Even TMF timed out, return direct. */
810 if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
811 if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
812 PM8001_FAIL_DBG(pm8001_ha,
813 pm8001_printk("TMF task timeout.\n"));
814 goto ex_err;
815 }
816 }
817
818 if (task->task_status.resp == SAS_TASK_COMPLETE &&
819 task->task_status.stat == SAM_STAT_GOOD) {
820 res = TMF_RESP_FUNC_COMPLETE;
821 break;
822
823 } else {
824 PM8001_EH_DBG(pm8001_ha,
825 pm8001_printk(" Task to dev %016llx response: "
826 "0x%x status 0x%x\n",
827 SAS_ADDR(dev->sas_addr),
828 task->task_status.resp,
829 task->task_status.stat));
830 sas_free_task(task);
831 task = NULL;
832 }
833 }
834 ex_err:
835 BUG_ON(retry == 3 && task != NULL);
836 sas_free_task(task);
837 return res;
838 }
839
840 /**
841 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
842 * @dev: the device structure which sas layer used.
843 */
pm8001_dev_gone_notify(struct domain_device * dev)844 static void pm8001_dev_gone_notify(struct domain_device *dev)
845 {
846 unsigned long flags = 0;
847 struct pm8001_hba_info *pm8001_ha;
848 struct pm8001_device *pm8001_dev = dev->lldd_dev;
849
850 pm8001_ha = pm8001_find_ha_by_dev(dev);
851 spin_lock_irqsave(&pm8001_ha->lock, flags);
852 if (pm8001_dev) {
853 u32 device_id = pm8001_dev->device_id;
854
855 PM8001_DISC_DBG(pm8001_ha,
856 pm8001_printk("found dev[%d:%x] is gone.\n",
857 pm8001_dev->device_id, pm8001_dev->dev_type));
858 if (pm8001_dev->running_req) {
859 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
860 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
861 dev, 1, 0);
862 spin_lock_irqsave(&pm8001_ha->lock, flags);
863 }
864 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
865 pm8001_free_dev(pm8001_dev);
866 } else {
867 PM8001_DISC_DBG(pm8001_ha,
868 pm8001_printk("Found dev has gone.\n"));
869 }
870 dev->lldd_dev = NULL;
871 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
872 }
873
pm8001_dev_gone(struct domain_device * dev)874 void pm8001_dev_gone(struct domain_device *dev)
875 {
876 pm8001_dev_gone_notify(dev);
877 }
878
pm8001_issue_ssp_tmf(struct domain_device * dev,u8 * lun,struct pm8001_tmf_task * tmf)879 static int pm8001_issue_ssp_tmf(struct domain_device *dev,
880 u8 *lun, struct pm8001_tmf_task *tmf)
881 {
882 struct sas_ssp_task ssp_task;
883 if (!(dev->tproto & SAS_PROTOCOL_SSP))
884 return TMF_RESP_FUNC_ESUPP;
885
886 strncpy((u8 *)&ssp_task.LUN, lun, 8);
887 return pm8001_exec_internal_tmf_task(dev, &ssp_task, sizeof(ssp_task),
888 tmf);
889 }
890
891 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)892 void pm8001_open_reject_retry(
893 struct pm8001_hba_info *pm8001_ha,
894 struct sas_task *task_to_close,
895 struct pm8001_device *device_to_close)
896 {
897 int i;
898 unsigned long flags;
899
900 if (pm8001_ha == NULL)
901 return;
902
903 spin_lock_irqsave(&pm8001_ha->lock, flags);
904
905 for (i = 0; i < PM8001_MAX_CCB; i++) {
906 struct sas_task *task;
907 struct task_status_struct *ts;
908 struct pm8001_device *pm8001_dev;
909 unsigned long flags1;
910 u32 tag;
911 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
912
913 pm8001_dev = ccb->device;
914 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
915 continue;
916 if (!device_to_close) {
917 uintptr_t d = (uintptr_t)pm8001_dev
918 - (uintptr_t)&pm8001_ha->devices;
919 if (((d % sizeof(*pm8001_dev)) != 0)
920 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
921 continue;
922 } else if (pm8001_dev != device_to_close)
923 continue;
924 tag = ccb->ccb_tag;
925 if (!tag || (tag == 0xFFFFFFFF))
926 continue;
927 task = ccb->task;
928 if (!task || !task->task_done)
929 continue;
930 if (task_to_close && (task != task_to_close))
931 continue;
932 ts = &task->task_status;
933 ts->resp = SAS_TASK_COMPLETE;
934 /* Force the midlayer to retry */
935 ts->stat = SAS_OPEN_REJECT;
936 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
937 if (pm8001_dev)
938 pm8001_dev->running_req--;
939 spin_lock_irqsave(&task->task_state_lock, flags1);
940 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
941 task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
942 task->task_state_flags |= SAS_TASK_STATE_DONE;
943 if (unlikely((task->task_state_flags
944 & SAS_TASK_STATE_ABORTED))) {
945 spin_unlock_irqrestore(&task->task_state_lock,
946 flags1);
947 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
948 } else {
949 spin_unlock_irqrestore(&task->task_state_lock,
950 flags1);
951 pm8001_ccb_task_free(pm8001_ha, task, ccb, tag);
952 mb();/* in order to force CPU ordering */
953 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
954 task->task_done(task);
955 spin_lock_irqsave(&pm8001_ha->lock, flags);
956 }
957 }
958
959 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
960 }
961
962 /**
963 * Standard mandates link reset for ATA (type 0) and hard reset for
964 * SSP (type 1) , only for RECOVERY
965 */
pm8001_I_T_nexus_reset(struct domain_device * dev)966 int pm8001_I_T_nexus_reset(struct domain_device *dev)
967 {
968 int rc = TMF_RESP_FUNC_FAILED;
969 struct pm8001_device *pm8001_dev;
970 struct pm8001_hba_info *pm8001_ha;
971 struct sas_phy *phy;
972
973 if (!dev || !dev->lldd_dev)
974 return -ENODEV;
975
976 pm8001_dev = dev->lldd_dev;
977 pm8001_ha = pm8001_find_ha_by_dev(dev);
978 phy = sas_get_local_phy(dev);
979
980 if (dev_is_sata(dev)) {
981 if (scsi_is_sas_phy_local(phy)) {
982 rc = 0;
983 goto out;
984 }
985 rc = sas_phy_reset(phy, 1);
986 if (rc) {
987 PM8001_EH_DBG(pm8001_ha,
988 pm8001_printk("phy reset failed for device %x\n"
989 "with rc %d\n", pm8001_dev->device_id, rc));
990 rc = TMF_RESP_FUNC_FAILED;
991 goto out;
992 }
993 msleep(2000);
994 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
995 dev, 1, 0);
996 if (rc) {
997 PM8001_EH_DBG(pm8001_ha,
998 pm8001_printk("task abort failed %x\n"
999 "with rc %d\n", pm8001_dev->device_id, rc));
1000 rc = TMF_RESP_FUNC_FAILED;
1001 }
1002 } else {
1003 rc = sas_phy_reset(phy, 1);
1004 msleep(2000);
1005 }
1006 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1007 pm8001_dev->device_id, rc));
1008 out:
1009 sas_put_local_phy(phy);
1010 return rc;
1011 }
1012
1013 /*
1014 * This function handle the IT_NEXUS_XXX event or completion
1015 * status code for SSP/SATA/SMP I/O request.
1016 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)1017 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
1018 {
1019 int rc = TMF_RESP_FUNC_FAILED;
1020 struct pm8001_device *pm8001_dev;
1021 struct pm8001_hba_info *pm8001_ha;
1022 struct sas_phy *phy;
1023 u32 device_id = 0;
1024
1025 if (!dev || !dev->lldd_dev)
1026 return -1;
1027
1028 pm8001_dev = dev->lldd_dev;
1029 device_id = pm8001_dev->device_id;
1030 pm8001_ha = pm8001_find_ha_by_dev(dev);
1031
1032 PM8001_EH_DBG(pm8001_ha,
1033 pm8001_printk("I_T_Nexus handler invoked !!"));
1034
1035 phy = sas_get_local_phy(dev);
1036
1037 if (dev_is_sata(dev)) {
1038 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1039 if (scsi_is_sas_phy_local(phy)) {
1040 rc = 0;
1041 goto out;
1042 }
1043 /* send internal ssp/sata/smp abort command to FW */
1044 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1045 dev, 1, 0);
1046 msleep(100);
1047
1048 /* deregister the target device */
1049 pm8001_dev_gone_notify(dev);
1050 msleep(200);
1051
1052 /*send phy reset to hard reset target */
1053 rc = sas_phy_reset(phy, 1);
1054 msleep(2000);
1055 pm8001_dev->setds_completion = &completion_setstate;
1056
1057 wait_for_completion(&completion_setstate);
1058 } else {
1059 /* send internal ssp/sata/smp abort command to FW */
1060 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1061 dev, 1, 0);
1062 msleep(100);
1063
1064 /* deregister the target device */
1065 pm8001_dev_gone_notify(dev);
1066 msleep(200);
1067
1068 /*send phy reset to hard reset target */
1069 rc = sas_phy_reset(phy, 1);
1070 msleep(2000);
1071 }
1072 PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n",
1073 pm8001_dev->device_id, rc));
1074 out:
1075 sas_put_local_phy(phy);
1076
1077 return rc;
1078 }
1079 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)1080 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
1081 {
1082 int rc = TMF_RESP_FUNC_FAILED;
1083 struct pm8001_tmf_task tmf_task;
1084 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1085 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1086 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1087 if (dev_is_sata(dev)) {
1088 struct sas_phy *phy = sas_get_local_phy(dev);
1089 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev ,
1090 dev, 1, 0);
1091 rc = sas_phy_reset(phy, 1);
1092 sas_put_local_phy(phy);
1093 pm8001_dev->setds_completion = &completion_setstate;
1094 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1095 pm8001_dev, 0x01);
1096 wait_for_completion(&completion_setstate);
1097 } else {
1098 tmf_task.tmf = TMF_LU_RESET;
1099 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1100 }
1101 /* If failed, fall-through I_T_Nexus reset */
1102 PM8001_EH_DBG(pm8001_ha, pm8001_printk("for device[%x]:rc=%d\n",
1103 pm8001_dev->device_id, rc));
1104 return rc;
1105 }
1106
1107 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)1108 int pm8001_query_task(struct sas_task *task)
1109 {
1110 u32 tag = 0xdeadbeef;
1111 int i = 0;
1112 struct scsi_lun lun;
1113 struct pm8001_tmf_task tmf_task;
1114 int rc = TMF_RESP_FUNC_FAILED;
1115 if (unlikely(!task || !task->lldd_task || !task->dev))
1116 return rc;
1117
1118 if (task->task_proto & SAS_PROTOCOL_SSP) {
1119 struct scsi_cmnd *cmnd = task->uldd_task;
1120 struct domain_device *dev = task->dev;
1121 struct pm8001_hba_info *pm8001_ha =
1122 pm8001_find_ha_by_dev(dev);
1123
1124 int_to_scsilun(cmnd->device->lun, &lun);
1125 rc = pm8001_find_tag(task, &tag);
1126 if (rc == 0) {
1127 rc = TMF_RESP_FUNC_FAILED;
1128 return rc;
1129 }
1130 PM8001_EH_DBG(pm8001_ha, pm8001_printk("Query:["));
1131 for (i = 0; i < 16; i++)
1132 printk(KERN_INFO "%02x ", cmnd->cmnd[i]);
1133 printk(KERN_INFO "]\n");
1134 tmf_task.tmf = TMF_QUERY_TASK;
1135 tmf_task.tag_of_task_to_be_managed = tag;
1136
1137 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1138 switch (rc) {
1139 /* The task is still in Lun, release it then */
1140 case TMF_RESP_FUNC_SUCC:
1141 PM8001_EH_DBG(pm8001_ha,
1142 pm8001_printk("The task is still in Lun\n"));
1143 break;
1144 /* The task is not in Lun or failed, reset the phy */
1145 case TMF_RESP_FUNC_FAILED:
1146 case TMF_RESP_FUNC_COMPLETE:
1147 PM8001_EH_DBG(pm8001_ha,
1148 pm8001_printk("The task is not in Lun or failed,"
1149 " reset the phy\n"));
1150 break;
1151 }
1152 }
1153 pm8001_printk(":rc= %d\n", rc);
1154 return rc;
1155 }
1156
1157 /* mandatory SAM-3, still need free task/ccb info, abord the specified task */
pm8001_abort_task(struct sas_task * task)1158 int pm8001_abort_task(struct sas_task *task)
1159 {
1160 unsigned long flags;
1161 u32 tag;
1162 u32 device_id;
1163 struct domain_device *dev ;
1164 struct pm8001_hba_info *pm8001_ha;
1165 struct scsi_lun lun;
1166 struct pm8001_device *pm8001_dev;
1167 struct pm8001_tmf_task tmf_task;
1168 int rc = TMF_RESP_FUNC_FAILED, ret;
1169 u32 phy_id;
1170 struct sas_task_slow slow_task;
1171 if (unlikely(!task || !task->lldd_task || !task->dev))
1172 return TMF_RESP_FUNC_FAILED;
1173 dev = task->dev;
1174 pm8001_dev = dev->lldd_dev;
1175 pm8001_ha = pm8001_find_ha_by_dev(dev);
1176 device_id = pm8001_dev->device_id;
1177 phy_id = pm8001_dev->attached_phy;
1178 rc = pm8001_find_tag(task, &tag);
1179 if (rc == 0) {
1180 pm8001_printk("no tag for task:%p\n", task);
1181 return TMF_RESP_FUNC_FAILED;
1182 }
1183 spin_lock_irqsave(&task->task_state_lock, flags);
1184 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1185 spin_unlock_irqrestore(&task->task_state_lock, flags);
1186 return TMF_RESP_FUNC_COMPLETE;
1187 }
1188 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1189 if (task->slow_task == NULL) {
1190 init_completion(&slow_task.completion);
1191 task->slow_task = &slow_task;
1192 }
1193 spin_unlock_irqrestore(&task->task_state_lock, flags);
1194 if (task->task_proto & SAS_PROTOCOL_SSP) {
1195 struct scsi_cmnd *cmnd = task->uldd_task;
1196 int_to_scsilun(cmnd->device->lun, &lun);
1197 tmf_task.tmf = TMF_ABORT_TASK;
1198 tmf_task.tag_of_task_to_be_managed = tag;
1199 rc = pm8001_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
1200 pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1201 pm8001_dev->sas_device, 0, tag);
1202 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1203 task->task_proto & SAS_PROTOCOL_STP) {
1204 if (pm8001_ha->chip_id == chip_8006) {
1205 DECLARE_COMPLETION_ONSTACK(completion_reset);
1206 DECLARE_COMPLETION_ONSTACK(completion);
1207 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1208
1209 /* 1. Set Device state as Recovery */
1210 pm8001_dev->setds_completion = &completion;
1211 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1212 pm8001_dev, 0x03);
1213 wait_for_completion(&completion);
1214
1215 /* 2. Send Phy Control Hard Reset */
1216 reinit_completion(&completion);
1217 phy->reset_success = false;
1218 phy->enable_completion = &completion;
1219 phy->reset_completion = &completion_reset;
1220 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1221 PHY_HARD_RESET);
1222 if (ret)
1223 goto out;
1224 PM8001_MSG_DBG(pm8001_ha,
1225 pm8001_printk("Waiting for local phy ctl\n"));
1226 wait_for_completion(&completion);
1227 if (!phy->reset_success)
1228 goto out;
1229
1230 /* 3. Wait for Port Reset complete / Port reset TMO */
1231 PM8001_MSG_DBG(pm8001_ha,
1232 pm8001_printk("Waiting for Port reset\n"));
1233 wait_for_completion(&completion_reset);
1234 if (phy->port_reset_status)
1235 goto out;
1236
1237 /*
1238 * 4. SATA Abort ALL
1239 * we wait for the task to be aborted so that the task
1240 * is removed from the ccb. on success the caller is
1241 * going to free the task.
1242 */
1243 ret = pm8001_exec_internal_task_abort(pm8001_ha,
1244 pm8001_dev, pm8001_dev->sas_device, 1, tag);
1245 if (ret)
1246 goto out;
1247 ret = wait_for_completion_timeout(
1248 &task->slow_task->completion,
1249 PM8001_TASK_TIMEOUT * HZ);
1250 if (!ret)
1251 goto out;
1252
1253 /* 5. Set Device State as Operational */
1254 reinit_completion(&completion);
1255 pm8001_dev->setds_completion = &completion;
1256 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1257 pm8001_dev, 0x01);
1258 wait_for_completion(&completion);
1259 } else {
1260 rc = pm8001_exec_internal_task_abort(pm8001_ha,
1261 pm8001_dev, pm8001_dev->sas_device, 0, tag);
1262 }
1263 rc = TMF_RESP_FUNC_COMPLETE;
1264 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1265 /* SMP */
1266 rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev,
1267 pm8001_dev->sas_device, 0, tag);
1268
1269 }
1270 out:
1271 spin_lock_irqsave(&task->task_state_lock, flags);
1272 if (task->slow_task == &slow_task)
1273 task->slow_task = NULL;
1274 spin_unlock_irqrestore(&task->task_state_lock, flags);
1275 if (rc != TMF_RESP_FUNC_COMPLETE)
1276 pm8001_printk("rc= %d\n", rc);
1277 return rc;
1278 }
1279
pm8001_abort_task_set(struct domain_device * dev,u8 * lun)1280 int pm8001_abort_task_set(struct domain_device *dev, u8 *lun)
1281 {
1282 int rc = TMF_RESP_FUNC_FAILED;
1283 struct pm8001_tmf_task tmf_task;
1284
1285 tmf_task.tmf = TMF_ABORT_TASK_SET;
1286 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1287 return rc;
1288 }
1289
pm8001_clear_aca(struct domain_device * dev,u8 * lun)1290 int pm8001_clear_aca(struct domain_device *dev, u8 *lun)
1291 {
1292 int rc = TMF_RESP_FUNC_FAILED;
1293 struct pm8001_tmf_task tmf_task;
1294
1295 tmf_task.tmf = TMF_CLEAR_ACA;
1296 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1297
1298 return rc;
1299 }
1300
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1301 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1302 {
1303 int rc = TMF_RESP_FUNC_FAILED;
1304 struct pm8001_tmf_task tmf_task;
1305 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1306 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1307
1308 PM8001_EH_DBG(pm8001_ha,
1309 pm8001_printk("I_T_L_Q clear task set[%x]\n",
1310 pm8001_dev->device_id));
1311 tmf_task.tmf = TMF_CLEAR_TASK_SET;
1312 rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
1313 return rc;
1314 }
1315
1316