1 /*
2 * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver
3 *
4 * Copyright (c) 2008-2009 USI Co., Ltd.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
14 * substantially similar to the "NO WARRANTY" disclaimer below
15 * ("Disclaimer") and any redistribution must be conditioned upon
16 * including a substantially similar Disclaimer requirement for further
17 * binary redistribution.
18 * 3. Neither the names of the above-listed copyright holders nor the names
19 * of any contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
21 *
22 * Alternatively, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") version 2 as published by the Free
24 * Software Foundation.
25 *
26 * NO WARRANTY
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGES.
38 *
39 */
40
41 #include <linux/slab.h>
42 #include "pm8001_sas.h"
43 #include "pm80xx_tracepoints.h"
44
45 /**
46 * pm8001_find_tag - from sas task to find out tag that belongs to this task
47 * @task: the task sent to the LLDD
48 * @tag: the found tag associated with the task
49 */
pm8001_find_tag(struct sas_task * task,u32 * tag)50 static int pm8001_find_tag(struct sas_task *task, u32 *tag)
51 {
52 if (task->lldd_task) {
53 struct pm8001_ccb_info *ccb;
54 ccb = task->lldd_task;
55 *tag = ccb->ccb_tag;
56 return 1;
57 }
58 return 0;
59 }
60
61 /**
62 * pm8001_tag_free - free the no more needed tag
63 * @pm8001_ha: our hba struct
64 * @tag: the found tag associated with the task
65 */
pm8001_tag_free(struct pm8001_hba_info * pm8001_ha,u32 tag)66 void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag)
67 {
68 void *bitmap = pm8001_ha->tags;
69 unsigned long flags;
70
71 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
72 __clear_bit(tag, bitmap);
73 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
74 }
75
76 /**
77 * pm8001_tag_alloc - allocate a empty tag for task used.
78 * @pm8001_ha: our hba struct
79 * @tag_out: the found empty tag .
80 */
pm8001_tag_alloc(struct pm8001_hba_info * pm8001_ha,u32 * tag_out)81 int pm8001_tag_alloc(struct pm8001_hba_info *pm8001_ha, u32 *tag_out)
82 {
83 void *bitmap = pm8001_ha->tags;
84 unsigned long flags;
85 unsigned int tag;
86
87 spin_lock_irqsave(&pm8001_ha->bitmap_lock, flags);
88 tag = find_first_zero_bit(bitmap, pm8001_ha->tags_num);
89 if (tag >= pm8001_ha->tags_num) {
90 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
91 return -SAS_QUEUE_FULL;
92 }
93 __set_bit(tag, bitmap);
94 spin_unlock_irqrestore(&pm8001_ha->bitmap_lock, flags);
95 *tag_out = tag;
96 return 0;
97 }
98
pm8001_tag_init(struct pm8001_hba_info * pm8001_ha)99 void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha)
100 {
101 int i;
102 for (i = 0; i < pm8001_ha->tags_num; ++i)
103 pm8001_tag_free(pm8001_ha, i);
104 }
105
106 /**
107 * pm8001_mem_alloc - allocate memory for pm8001.
108 * @pdev: pci device.
109 * @virt_addr: the allocated virtual address
110 * @pphys_addr: DMA address for this device
111 * @pphys_addr_hi: the physical address high byte address.
112 * @pphys_addr_lo: the physical address low byte address.
113 * @mem_size: memory size.
114 * @align: requested byte alignment
115 */
pm8001_mem_alloc(struct pci_dev * pdev,void ** virt_addr,dma_addr_t * pphys_addr,u32 * pphys_addr_hi,u32 * pphys_addr_lo,u32 mem_size,u32 align)116 int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
117 dma_addr_t *pphys_addr, u32 *pphys_addr_hi,
118 u32 *pphys_addr_lo, u32 mem_size, u32 align)
119 {
120 caddr_t mem_virt_alloc;
121 dma_addr_t mem_dma_handle;
122 u64 phys_align;
123 u64 align_offset = 0;
124 if (align)
125 align_offset = (dma_addr_t)align - 1;
126 mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align,
127 &mem_dma_handle, GFP_KERNEL);
128 if (!mem_virt_alloc)
129 return -ENOMEM;
130 *pphys_addr = mem_dma_handle;
131 phys_align = (*pphys_addr + align_offset) & ~align_offset;
132 *virt_addr = (void *)mem_virt_alloc + phys_align - *pphys_addr;
133 *pphys_addr_hi = upper_32_bits(phys_align);
134 *pphys_addr_lo = lower_32_bits(phys_align);
135 return 0;
136 }
137
138 /**
139 * pm8001_find_ha_by_dev - from domain device which come from sas layer to
140 * find out our hba struct.
141 * @dev: the domain device which from sas layer.
142 */
143 static
pm8001_find_ha_by_dev(struct domain_device * dev)144 struct pm8001_hba_info *pm8001_find_ha_by_dev(struct domain_device *dev)
145 {
146 struct sas_ha_struct *sha = dev->port->ha;
147 struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
148 return pm8001_ha;
149 }
150
151 /**
152 * pm8001_phy_control - this function should be registered to
153 * sas_domain_function_template to provide libsas used, note: this is just
154 * control the HBA phy rather than other expander phy if you want control
155 * other phy, you should use SMP command.
156 * @sas_phy: which phy in HBA phys.
157 * @func: the operation.
158 * @funcdata: always NULL.
159 */
pm8001_phy_control(struct asd_sas_phy * sas_phy,enum phy_func func,void * funcdata)160 int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
161 void *funcdata)
162 {
163 int rc = 0, phy_id = sas_phy->id;
164 struct pm8001_hba_info *pm8001_ha = NULL;
165 struct sas_phy_linkrates *rates;
166 struct pm8001_phy *phy;
167 DECLARE_COMPLETION_ONSTACK(completion);
168 unsigned long flags;
169 pm8001_ha = sas_phy->ha->lldd_ha;
170 phy = &pm8001_ha->phy[phy_id];
171 pm8001_ha->phy[phy_id].enable_completion = &completion;
172 switch (func) {
173 case PHY_FUNC_SET_LINK_RATE:
174 rates = funcdata;
175 if (rates->minimum_linkrate) {
176 pm8001_ha->phy[phy_id].minimum_linkrate =
177 rates->minimum_linkrate;
178 }
179 if (rates->maximum_linkrate) {
180 pm8001_ha->phy[phy_id].maximum_linkrate =
181 rates->maximum_linkrate;
182 }
183 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
184 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
185 wait_for_completion(&completion);
186 }
187 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
188 PHY_LINK_RESET);
189 break;
190 case PHY_FUNC_HARD_RESET:
191 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
192 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
193 wait_for_completion(&completion);
194 }
195 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
196 PHY_HARD_RESET);
197 break;
198 case PHY_FUNC_LINK_RESET:
199 if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
200 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
201 wait_for_completion(&completion);
202 }
203 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
204 PHY_LINK_RESET);
205 break;
206 case PHY_FUNC_RELEASE_SPINUP_HOLD:
207 PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
208 PHY_LINK_RESET);
209 break;
210 case PHY_FUNC_DISABLE:
211 if (pm8001_ha->chip_id != chip_8001) {
212 if (pm8001_ha->phy[phy_id].phy_state ==
213 PHY_STATE_LINK_UP_SPCV) {
214 sas_phy_disconnected(&phy->sas_phy);
215 sas_notify_phy_event(&phy->sas_phy,
216 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
217 phy->phy_attached = 0;
218 }
219 } else {
220 if (pm8001_ha->phy[phy_id].phy_state ==
221 PHY_STATE_LINK_UP_SPC) {
222 sas_phy_disconnected(&phy->sas_phy);
223 sas_notify_phy_event(&phy->sas_phy,
224 PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
225 phy->phy_attached = 0;
226 }
227 }
228 PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
229 break;
230 case PHY_FUNC_GET_EVENTS:
231 spin_lock_irqsave(&pm8001_ha->lock, flags);
232 if (pm8001_ha->chip_id == chip_8001) {
233 if (-1 == pm8001_bar4_shift(pm8001_ha,
234 (phy_id < 4) ? 0x30000 : 0x40000)) {
235 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
236 return -EINVAL;
237 }
238 }
239 {
240 struct sas_phy *phy = sas_phy->phy;
241 u32 __iomem *qp = pm8001_ha->io_mem[2].memvirtaddr
242 + 0x1034 + (0x4000 * (phy_id & 3));
243
244 phy->invalid_dword_count = readl(qp);
245 phy->running_disparity_error_count = readl(&qp[1]);
246 phy->loss_of_dword_sync_count = readl(&qp[3]);
247 phy->phy_reset_problem_count = readl(&qp[4]);
248 }
249 if (pm8001_ha->chip_id == chip_8001)
250 pm8001_bar4_shift(pm8001_ha, 0);
251 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
252 return 0;
253 default:
254 pm8001_dbg(pm8001_ha, DEVIO, "func 0x%x\n", func);
255 rc = -EOPNOTSUPP;
256 }
257 msleep(300);
258 return rc;
259 }
260
261 /**
262 * pm8001_scan_start - we should enable all HBA phys by sending the phy_start
263 * command to HBA.
264 * @shost: the scsi host data.
265 */
pm8001_scan_start(struct Scsi_Host * shost)266 void pm8001_scan_start(struct Scsi_Host *shost)
267 {
268 int i;
269 struct pm8001_hba_info *pm8001_ha;
270 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
271 DECLARE_COMPLETION_ONSTACK(completion);
272 pm8001_ha = sha->lldd_ha;
273 /* SAS_RE_INITIALIZATION not available in SPCv/ve */
274 if (pm8001_ha->chip_id == chip_8001)
275 PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
276 for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
277 pm8001_ha->phy[i].enable_completion = &completion;
278 PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
279 wait_for_completion(&completion);
280 msleep(300);
281 }
282 }
283
pm8001_scan_finished(struct Scsi_Host * shost,unsigned long time)284 int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
285 {
286 struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
287
288 /* give the phy enabling interrupt event time to come in (1s
289 * is empirically about all it takes) */
290 if (time < HZ)
291 return 0;
292 /* Wait for discovery to finish */
293 sas_drain_work(ha);
294 return 1;
295 }
296
297 /**
298 * pm8001_task_prep_smp - the dispatcher function, prepare data for smp task
299 * @pm8001_ha: our hba card information
300 * @ccb: the ccb which attached to smp task
301 */
pm8001_task_prep_smp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)302 static int pm8001_task_prep_smp(struct pm8001_hba_info *pm8001_ha,
303 struct pm8001_ccb_info *ccb)
304 {
305 return PM8001_CHIP_DISP->smp_req(pm8001_ha, ccb);
306 }
307
pm8001_get_ncq_tag(struct sas_task * task,u32 * tag)308 u32 pm8001_get_ncq_tag(struct sas_task *task, u32 *tag)
309 {
310 struct ata_queued_cmd *qc = task->uldd_task;
311
312 if (qc && ata_is_ncq(qc->tf.protocol)) {
313 *tag = qc->tag;
314 return 1;
315 }
316
317 return 0;
318 }
319
320 /**
321 * pm8001_task_prep_ata - the dispatcher function, prepare data for sata task
322 * @pm8001_ha: our hba card information
323 * @ccb: the ccb which attached to sata task
324 */
pm8001_task_prep_ata(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)325 static int pm8001_task_prep_ata(struct pm8001_hba_info *pm8001_ha,
326 struct pm8001_ccb_info *ccb)
327 {
328 return PM8001_CHIP_DISP->sata_req(pm8001_ha, ccb);
329 }
330
331 /**
332 * pm8001_task_prep_internal_abort - the dispatcher function, prepare data
333 * for internal abort task
334 * @pm8001_ha: our hba card information
335 * @ccb: the ccb which attached to sata task
336 */
pm8001_task_prep_internal_abort(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)337 static int pm8001_task_prep_internal_abort(struct pm8001_hba_info *pm8001_ha,
338 struct pm8001_ccb_info *ccb)
339 {
340 return PM8001_CHIP_DISP->task_abort(pm8001_ha, ccb);
341 }
342
343 /**
344 * pm8001_task_prep_ssp_tm - the dispatcher function, prepare task management data
345 * @pm8001_ha: our hba card information
346 * @ccb: the ccb which attached to TM
347 * @tmf: the task management IU
348 */
pm8001_task_prep_ssp_tm(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb,struct sas_tmf_task * tmf)349 static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha,
350 struct pm8001_ccb_info *ccb, struct sas_tmf_task *tmf)
351 {
352 return PM8001_CHIP_DISP->ssp_tm_req(pm8001_ha, ccb, tmf);
353 }
354
355 /**
356 * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task
357 * @pm8001_ha: our hba card information
358 * @ccb: the ccb which attached to ssp task
359 */
pm8001_task_prep_ssp(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)360 static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha,
361 struct pm8001_ccb_info *ccb)
362 {
363 return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb);
364 }
365
366 /* Find the local port id that's attached to this device */
sas_find_local_port_id(struct domain_device * dev)367 static int sas_find_local_port_id(struct domain_device *dev)
368 {
369 struct domain_device *pdev = dev->parent;
370
371 /* Directly attached device */
372 if (!pdev)
373 return dev->port->id;
374 while (pdev) {
375 struct domain_device *pdev_p = pdev->parent;
376 if (!pdev_p)
377 return pdev->port->id;
378 pdev = pdev->parent;
379 }
380 return 0;
381 }
382
383 #define DEV_IS_GONE(pm8001_dev) \
384 ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)))
385
386
pm8001_deliver_command(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)387 static int pm8001_deliver_command(struct pm8001_hba_info *pm8001_ha,
388 struct pm8001_ccb_info *ccb)
389 {
390 struct sas_task *task = ccb->task;
391 enum sas_protocol task_proto = task->task_proto;
392 struct sas_tmf_task *tmf = task->tmf;
393 int is_tmf = !!tmf;
394
395 switch (task_proto) {
396 case SAS_PROTOCOL_SMP:
397 return pm8001_task_prep_smp(pm8001_ha, ccb);
398 case SAS_PROTOCOL_SSP:
399 if (is_tmf)
400 return pm8001_task_prep_ssp_tm(pm8001_ha, ccb, tmf);
401 return pm8001_task_prep_ssp(pm8001_ha, ccb);
402 case SAS_PROTOCOL_SATA:
403 case SAS_PROTOCOL_STP:
404 return pm8001_task_prep_ata(pm8001_ha, ccb);
405 case SAS_PROTOCOL_INTERNAL_ABORT:
406 return pm8001_task_prep_internal_abort(pm8001_ha, ccb);
407 default:
408 dev_err(pm8001_ha->dev, "unknown sas_task proto: 0x%x\n",
409 task_proto);
410 }
411
412 return -EINVAL;
413 }
414
415 /**
416 * pm8001_queue_command - register for upper layer used, all IO commands sent
417 * to HBA are from this interface.
418 * @task: the task to be execute.
419 * @gfp_flags: gfp_flags
420 */
pm8001_queue_command(struct sas_task * task,gfp_t gfp_flags)421 int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
422 {
423 struct task_status_struct *ts = &task->task_status;
424 enum sas_protocol task_proto = task->task_proto;
425 struct domain_device *dev = task->dev;
426 struct pm8001_device *pm8001_dev = dev->lldd_dev;
427 bool internal_abort = sas_is_internal_abort(task);
428 struct pm8001_hba_info *pm8001_ha;
429 struct pm8001_port *port = NULL;
430 struct pm8001_ccb_info *ccb;
431 unsigned long flags;
432 u32 n_elem = 0;
433 int rc = 0;
434
435 if (!internal_abort && !dev->port) {
436 ts->resp = SAS_TASK_UNDELIVERED;
437 ts->stat = SAS_PHY_DOWN;
438 if (dev->dev_type != SAS_SATA_DEV)
439 task->task_done(task);
440 return 0;
441 }
442
443 pm8001_ha = pm8001_find_ha_by_dev(dev);
444 if (pm8001_ha->controller_fatal_error) {
445 ts->resp = SAS_TASK_UNDELIVERED;
446 task->task_done(task);
447 return 0;
448 }
449
450 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec device\n");
451
452 spin_lock_irqsave(&pm8001_ha->lock, flags);
453
454 pm8001_dev = dev->lldd_dev;
455 port = &pm8001_ha->port[sas_find_local_port_id(dev)];
456
457 if (!internal_abort &&
458 (DEV_IS_GONE(pm8001_dev) || !port->port_attached)) {
459 ts->resp = SAS_TASK_UNDELIVERED;
460 ts->stat = SAS_PHY_DOWN;
461 if (sas_protocol_ata(task_proto)) {
462 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
463 task->task_done(task);
464 spin_lock_irqsave(&pm8001_ha->lock, flags);
465 } else {
466 task->task_done(task);
467 }
468 rc = -ENODEV;
469 goto err_out;
470 }
471
472 ccb = pm8001_ccb_alloc(pm8001_ha, pm8001_dev, task);
473 if (!ccb) {
474 rc = -SAS_QUEUE_FULL;
475 goto err_out;
476 }
477
478 if (!sas_protocol_ata(task_proto)) {
479 if (task->num_scatter) {
480 n_elem = dma_map_sg(pm8001_ha->dev, task->scatter,
481 task->num_scatter, task->data_dir);
482 if (!n_elem) {
483 rc = -ENOMEM;
484 goto err_out_ccb;
485 }
486 }
487 } else {
488 n_elem = task->num_scatter;
489 }
490
491 task->lldd_task = ccb;
492 ccb->n_elem = n_elem;
493
494 atomic_inc(&pm8001_dev->running_req);
495
496 rc = pm8001_deliver_command(pm8001_ha, ccb);
497 if (rc) {
498 atomic_dec(&pm8001_dev->running_req);
499 if (!sas_protocol_ata(task_proto) && n_elem)
500 dma_unmap_sg(pm8001_ha->dev, task->scatter,
501 task->num_scatter, task->data_dir);
502 err_out_ccb:
503 pm8001_ccb_free(pm8001_ha, ccb);
504
505 err_out:
506 pm8001_dbg(pm8001_ha, IO, "pm8001_task_exec failed[%d]!\n", rc);
507 }
508
509 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
510
511 return rc;
512 }
513
514 /**
515 * pm8001_ccb_task_free - free the sg for ssp and smp command, free the ccb.
516 * @pm8001_ha: our hba card information
517 * @ccb: the ccb which attached to ssp task to free
518 */
pm8001_ccb_task_free(struct pm8001_hba_info * pm8001_ha,struct pm8001_ccb_info * ccb)519 void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
520 struct pm8001_ccb_info *ccb)
521 {
522 struct sas_task *task = ccb->task;
523 struct ata_queued_cmd *qc;
524 struct pm8001_device *pm8001_dev;
525
526 if (!task)
527 return;
528
529 if (!sas_protocol_ata(task->task_proto) && ccb->n_elem)
530 dma_unmap_sg(pm8001_ha->dev, task->scatter,
531 task->num_scatter, task->data_dir);
532
533 switch (task->task_proto) {
534 case SAS_PROTOCOL_SMP:
535 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
536 DMA_FROM_DEVICE);
537 dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
538 DMA_TO_DEVICE);
539 break;
540
541 case SAS_PROTOCOL_SATA:
542 case SAS_PROTOCOL_STP:
543 case SAS_PROTOCOL_SSP:
544 default:
545 /* do nothing */
546 break;
547 }
548
549 if (sas_protocol_ata(task->task_proto)) {
550 /* For SCSI/ATA commands uldd_task points to ata_queued_cmd */
551 qc = task->uldd_task;
552 pm8001_dev = ccb->device;
553 trace_pm80xx_request_complete(pm8001_ha->id,
554 pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
555 ccb->ccb_tag, 0 /* ctlr_opcode not known */,
556 qc ? qc->tf.command : 0, // ata opcode
557 pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
558 }
559
560 task->lldd_task = NULL;
561 pm8001_ccb_free(pm8001_ha, ccb);
562 }
563
564 /**
565 * pm8001_alloc_dev - find a empty pm8001_device
566 * @pm8001_ha: our hba card information
567 */
pm8001_alloc_dev(struct pm8001_hba_info * pm8001_ha)568 static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha)
569 {
570 u32 dev;
571 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
572 if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) {
573 pm8001_ha->devices[dev].id = dev;
574 return &pm8001_ha->devices[dev];
575 }
576 }
577 if (dev == PM8001_MAX_DEVICES) {
578 pm8001_dbg(pm8001_ha, FAIL,
579 "max support %d devices, ignore ..\n",
580 PM8001_MAX_DEVICES);
581 }
582 return NULL;
583 }
584 /**
585 * pm8001_find_dev - find a matching pm8001_device
586 * @pm8001_ha: our hba card information
587 * @device_id: device ID to match against
588 */
pm8001_find_dev(struct pm8001_hba_info * pm8001_ha,u32 device_id)589 struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha,
590 u32 device_id)
591 {
592 u32 dev;
593 for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) {
594 if (pm8001_ha->devices[dev].device_id == device_id)
595 return &pm8001_ha->devices[dev];
596 }
597 if (dev == PM8001_MAX_DEVICES) {
598 pm8001_dbg(pm8001_ha, FAIL, "NO MATCHING DEVICE FOUND !!!\n");
599 }
600 return NULL;
601 }
602
pm8001_free_dev(struct pm8001_device * pm8001_dev)603 void pm8001_free_dev(struct pm8001_device *pm8001_dev)
604 {
605 u32 id = pm8001_dev->id;
606 memset(pm8001_dev, 0, sizeof(*pm8001_dev));
607 pm8001_dev->id = id;
608 pm8001_dev->dev_type = SAS_PHY_UNUSED;
609 pm8001_dev->device_id = PM8001_MAX_DEVICES;
610 pm8001_dev->sas_device = NULL;
611 }
612
613 /**
614 * pm8001_dev_found_notify - libsas notify a device is found.
615 * @dev: the device structure which sas layer used.
616 *
617 * when libsas find a sas domain device, it should tell the LLDD that
618 * device is found, and then LLDD register this device to HBA firmware
619 * by the command "OPC_INB_REG_DEV", after that the HBA will assign a
620 * device ID(according to device's sas address) and returned it to LLDD. From
621 * now on, we communicate with HBA FW with the device ID which HBA assigned
622 * rather than sas address. it is the necessary step for our HBA but it is
623 * the optional for other HBA driver.
624 */
pm8001_dev_found_notify(struct domain_device * dev)625 static int pm8001_dev_found_notify(struct domain_device *dev)
626 {
627 unsigned long flags = 0;
628 int res = 0;
629 struct pm8001_hba_info *pm8001_ha = NULL;
630 struct domain_device *parent_dev = dev->parent;
631 struct pm8001_device *pm8001_device;
632 DECLARE_COMPLETION_ONSTACK(completion);
633 u32 flag = 0;
634 pm8001_ha = pm8001_find_ha_by_dev(dev);
635 spin_lock_irqsave(&pm8001_ha->lock, flags);
636
637 pm8001_device = pm8001_alloc_dev(pm8001_ha);
638 if (!pm8001_device) {
639 res = -1;
640 goto found_out;
641 }
642 pm8001_device->sas_device = dev;
643 dev->lldd_dev = pm8001_device;
644 pm8001_device->dev_type = dev->dev_type;
645 pm8001_device->dcompletion = &completion;
646 if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
647 int phy_id;
648 struct ex_phy *phy;
649 for (phy_id = 0; phy_id < parent_dev->ex_dev.num_phys;
650 phy_id++) {
651 phy = &parent_dev->ex_dev.ex_phy[phy_id];
652 if (SAS_ADDR(phy->attached_sas_addr)
653 == SAS_ADDR(dev->sas_addr)) {
654 pm8001_device->attached_phy = phy_id;
655 break;
656 }
657 }
658 if (phy_id == parent_dev->ex_dev.num_phys) {
659 pm8001_dbg(pm8001_ha, FAIL,
660 "Error: no attached dev:%016llx at ex:%016llx.\n",
661 SAS_ADDR(dev->sas_addr),
662 SAS_ADDR(parent_dev->sas_addr));
663 res = -1;
664 }
665 } else {
666 if (dev->dev_type == SAS_SATA_DEV) {
667 pm8001_device->attached_phy =
668 dev->rphy->identify.phy_identifier;
669 flag = 1; /* directly sata */
670 }
671 } /*register this device to HBA*/
672 pm8001_dbg(pm8001_ha, DISC, "Found device\n");
673 PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag);
674 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
675 wait_for_completion(&completion);
676 if (dev->dev_type == SAS_END_DEVICE)
677 msleep(50);
678 pm8001_ha->flags = PM8001F_RUN_TIME;
679 return 0;
680 found_out:
681 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
682 return res;
683 }
684
pm8001_dev_found(struct domain_device * dev)685 int pm8001_dev_found(struct domain_device *dev)
686 {
687 return pm8001_dev_found_notify(dev);
688 }
689
pm8001_task_done(struct sas_task * task)690 void pm8001_task_done(struct sas_task *task)
691 {
692 del_timer(&task->slow_task->timer);
693 complete(&task->slow_task->completion);
694 }
695
696 #define PM8001_TASK_TIMEOUT 20
697
698 /**
699 * pm8001_dev_gone_notify - see the comments for "pm8001_dev_found_notify"
700 * @dev: the device structure which sas layer used.
701 */
pm8001_dev_gone_notify(struct domain_device * dev)702 static void pm8001_dev_gone_notify(struct domain_device *dev)
703 {
704 unsigned long flags = 0;
705 struct pm8001_hba_info *pm8001_ha;
706 struct pm8001_device *pm8001_dev = dev->lldd_dev;
707
708 pm8001_ha = pm8001_find_ha_by_dev(dev);
709 spin_lock_irqsave(&pm8001_ha->lock, flags);
710 if (pm8001_dev) {
711 u32 device_id = pm8001_dev->device_id;
712
713 pm8001_dbg(pm8001_ha, DISC, "found dev[%d:%x] is gone.\n",
714 pm8001_dev->device_id, pm8001_dev->dev_type);
715 if (atomic_read(&pm8001_dev->running_req)) {
716 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
717 sas_execute_internal_abort_dev(dev, 0, NULL);
718 while (atomic_read(&pm8001_dev->running_req))
719 msleep(20);
720 spin_lock_irqsave(&pm8001_ha->lock, flags);
721 }
722 PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
723 pm8001_free_dev(pm8001_dev);
724 } else {
725 pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
726 }
727 dev->lldd_dev = NULL;
728 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
729 }
730
pm8001_dev_gone(struct domain_device * dev)731 void pm8001_dev_gone(struct domain_device *dev)
732 {
733 pm8001_dev_gone_notify(dev);
734 }
735
736 /* retry commands by ha, by task and/or by device */
pm8001_open_reject_retry(struct pm8001_hba_info * pm8001_ha,struct sas_task * task_to_close,struct pm8001_device * device_to_close)737 void pm8001_open_reject_retry(
738 struct pm8001_hba_info *pm8001_ha,
739 struct sas_task *task_to_close,
740 struct pm8001_device *device_to_close)
741 {
742 int i;
743 unsigned long flags;
744
745 if (pm8001_ha == NULL)
746 return;
747
748 spin_lock_irqsave(&pm8001_ha->lock, flags);
749
750 for (i = 0; i < PM8001_MAX_CCB; i++) {
751 struct sas_task *task;
752 struct task_status_struct *ts;
753 struct pm8001_device *pm8001_dev;
754 unsigned long flags1;
755 struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i];
756
757 if (ccb->ccb_tag == PM8001_INVALID_TAG)
758 continue;
759
760 pm8001_dev = ccb->device;
761 if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))
762 continue;
763 if (!device_to_close) {
764 uintptr_t d = (uintptr_t)pm8001_dev
765 - (uintptr_t)&pm8001_ha->devices;
766 if (((d % sizeof(*pm8001_dev)) != 0)
767 || ((d / sizeof(*pm8001_dev)) >= PM8001_MAX_DEVICES))
768 continue;
769 } else if (pm8001_dev != device_to_close)
770 continue;
771 task = ccb->task;
772 if (!task || !task->task_done)
773 continue;
774 if (task_to_close && (task != task_to_close))
775 continue;
776 ts = &task->task_status;
777 ts->resp = SAS_TASK_COMPLETE;
778 /* Force the midlayer to retry */
779 ts->stat = SAS_OPEN_REJECT;
780 ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
781 if (pm8001_dev)
782 atomic_dec(&pm8001_dev->running_req);
783 spin_lock_irqsave(&task->task_state_lock, flags1);
784 task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
785 task->task_state_flags |= SAS_TASK_STATE_DONE;
786 if (unlikely((task->task_state_flags
787 & SAS_TASK_STATE_ABORTED))) {
788 spin_unlock_irqrestore(&task->task_state_lock,
789 flags1);
790 pm8001_ccb_task_free(pm8001_ha, ccb);
791 } else {
792 spin_unlock_irqrestore(&task->task_state_lock,
793 flags1);
794 pm8001_ccb_task_free(pm8001_ha, ccb);
795 mb();/* in order to force CPU ordering */
796 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
797 task->task_done(task);
798 spin_lock_irqsave(&pm8001_ha->lock, flags);
799 }
800 }
801
802 spin_unlock_irqrestore(&pm8001_ha->lock, flags);
803 }
804
805 /**
806 * pm8001_I_T_nexus_reset() - reset the initiator/target connection
807 * @dev: the device structure for the device to reset.
808 *
809 * Standard mandates link reset for ATA (type 0) and hard reset for
810 * SSP (type 1), only for RECOVERY
811 */
pm8001_I_T_nexus_reset(struct domain_device * dev)812 int pm8001_I_T_nexus_reset(struct domain_device *dev)
813 {
814 int rc = TMF_RESP_FUNC_FAILED;
815 struct pm8001_device *pm8001_dev;
816 struct pm8001_hba_info *pm8001_ha;
817 struct sas_phy *phy;
818
819 if (!dev || !dev->lldd_dev)
820 return -ENODEV;
821
822 pm8001_dev = dev->lldd_dev;
823 pm8001_ha = pm8001_find_ha_by_dev(dev);
824 phy = sas_get_local_phy(dev);
825
826 if (dev_is_sata(dev)) {
827 if (scsi_is_sas_phy_local(phy)) {
828 rc = 0;
829 goto out;
830 }
831 rc = sas_phy_reset(phy, 1);
832 if (rc) {
833 pm8001_dbg(pm8001_ha, EH,
834 "phy reset failed for device %x\n"
835 "with rc %d\n", pm8001_dev->device_id, rc);
836 rc = TMF_RESP_FUNC_FAILED;
837 goto out;
838 }
839 msleep(2000);
840 rc = sas_execute_internal_abort_dev(dev, 0, NULL);
841 if (rc) {
842 pm8001_dbg(pm8001_ha, EH, "task abort failed %x\n"
843 "with rc %d\n", pm8001_dev->device_id, rc);
844 rc = TMF_RESP_FUNC_FAILED;
845 }
846 } else {
847 rc = sas_phy_reset(phy, 1);
848 msleep(2000);
849 }
850 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
851 pm8001_dev->device_id, rc);
852 out:
853 sas_put_local_phy(phy);
854 return rc;
855 }
856
857 /*
858 * This function handle the IT_NEXUS_XXX event or completion
859 * status code for SSP/SATA/SMP I/O request.
860 */
pm8001_I_T_nexus_event_handler(struct domain_device * dev)861 int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
862 {
863 int rc = TMF_RESP_FUNC_FAILED;
864 struct pm8001_device *pm8001_dev;
865 struct pm8001_hba_info *pm8001_ha;
866 struct sas_phy *phy;
867
868 if (!dev || !dev->lldd_dev)
869 return -1;
870
871 pm8001_dev = dev->lldd_dev;
872 pm8001_ha = pm8001_find_ha_by_dev(dev);
873
874 pm8001_dbg(pm8001_ha, EH, "I_T_Nexus handler invoked !!\n");
875
876 phy = sas_get_local_phy(dev);
877
878 if (dev_is_sata(dev)) {
879 DECLARE_COMPLETION_ONSTACK(completion_setstate);
880 if (scsi_is_sas_phy_local(phy)) {
881 rc = 0;
882 goto out;
883 }
884 /* send internal ssp/sata/smp abort command to FW */
885 sas_execute_internal_abort_dev(dev, 0, NULL);
886 msleep(100);
887
888 /* deregister the target device */
889 pm8001_dev_gone_notify(dev);
890 msleep(200);
891
892 /*send phy reset to hard reset target */
893 rc = sas_phy_reset(phy, 1);
894 msleep(2000);
895 pm8001_dev->setds_completion = &completion_setstate;
896
897 wait_for_completion(&completion_setstate);
898 } else {
899 /* send internal ssp/sata/smp abort command to FW */
900 sas_execute_internal_abort_dev(dev, 0, NULL);
901 msleep(100);
902
903 /* deregister the target device */
904 pm8001_dev_gone_notify(dev);
905 msleep(200);
906
907 /*send phy reset to hard reset target */
908 rc = sas_phy_reset(phy, 1);
909 msleep(2000);
910 }
911 pm8001_dbg(pm8001_ha, EH, " for device[%x]:rc=%d\n",
912 pm8001_dev->device_id, rc);
913 out:
914 sas_put_local_phy(phy);
915
916 return rc;
917 }
918 /* mandatory SAM-3, the task reset the specified LUN*/
pm8001_lu_reset(struct domain_device * dev,u8 * lun)919 int pm8001_lu_reset(struct domain_device *dev, u8 *lun)
920 {
921 int rc = TMF_RESP_FUNC_FAILED;
922 struct pm8001_device *pm8001_dev = dev->lldd_dev;
923 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
924 DECLARE_COMPLETION_ONSTACK(completion_setstate);
925 if (dev_is_sata(dev)) {
926 struct sas_phy *phy = sas_get_local_phy(dev);
927 sas_execute_internal_abort_dev(dev, 0, NULL);
928 rc = sas_phy_reset(phy, 1);
929 sas_put_local_phy(phy);
930 pm8001_dev->setds_completion = &completion_setstate;
931 rc = PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
932 pm8001_dev, DS_OPERATIONAL);
933 wait_for_completion(&completion_setstate);
934 } else {
935 rc = sas_lu_reset(dev, lun);
936 }
937 /* If failed, fall-through I_T_Nexus reset */
938 pm8001_dbg(pm8001_ha, EH, "for device[%x]:rc=%d\n",
939 pm8001_dev->device_id, rc);
940 return rc;
941 }
942
943 /* optional SAM-3 */
pm8001_query_task(struct sas_task * task)944 int pm8001_query_task(struct sas_task *task)
945 {
946 u32 tag = 0xdeadbeef;
947 int rc = TMF_RESP_FUNC_FAILED;
948 if (unlikely(!task || !task->lldd_task || !task->dev))
949 return rc;
950
951 if (task->task_proto & SAS_PROTOCOL_SSP) {
952 struct scsi_cmnd *cmnd = task->uldd_task;
953 struct domain_device *dev = task->dev;
954 struct pm8001_hba_info *pm8001_ha =
955 pm8001_find_ha_by_dev(dev);
956
957 rc = pm8001_find_tag(task, &tag);
958 if (rc == 0) {
959 rc = TMF_RESP_FUNC_FAILED;
960 return rc;
961 }
962 pm8001_dbg(pm8001_ha, EH, "Query:[%16ph]\n", cmnd->cmnd);
963
964 rc = sas_query_task(task, tag);
965 switch (rc) {
966 /* The task is still in Lun, release it then */
967 case TMF_RESP_FUNC_SUCC:
968 pm8001_dbg(pm8001_ha, EH,
969 "The task is still in Lun\n");
970 break;
971 /* The task is not in Lun or failed, reset the phy */
972 case TMF_RESP_FUNC_FAILED:
973 case TMF_RESP_FUNC_COMPLETE:
974 pm8001_dbg(pm8001_ha, EH,
975 "The task is not in Lun or failed, reset the phy\n");
976 break;
977 }
978 }
979 pr_err("pm80xx: rc= %d\n", rc);
980 return rc;
981 }
982
983 /* mandatory SAM-3, still need free task/ccb info, abort the specified task */
pm8001_abort_task(struct sas_task * task)984 int pm8001_abort_task(struct sas_task *task)
985 {
986 unsigned long flags;
987 u32 tag;
988 struct domain_device *dev ;
989 struct pm8001_hba_info *pm8001_ha;
990 struct pm8001_device *pm8001_dev;
991 int rc = TMF_RESP_FUNC_FAILED, ret;
992 u32 phy_id, port_id;
993 struct sas_task_slow slow_task;
994
995 if (unlikely(!task || !task->lldd_task || !task->dev))
996 return TMF_RESP_FUNC_FAILED;
997
998 dev = task->dev;
999 pm8001_dev = dev->lldd_dev;
1000 pm8001_ha = pm8001_find_ha_by_dev(dev);
1001 phy_id = pm8001_dev->attached_phy;
1002
1003 if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
1004 // If the controller is seeing fatal errors
1005 // abort task will not get a response from the controller
1006 return TMF_RESP_FUNC_FAILED;
1007 }
1008
1009 ret = pm8001_find_tag(task, &tag);
1010 if (ret == 0) {
1011 pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
1012 return TMF_RESP_FUNC_FAILED;
1013 }
1014 spin_lock_irqsave(&task->task_state_lock, flags);
1015 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
1016 spin_unlock_irqrestore(&task->task_state_lock, flags);
1017 return TMF_RESP_FUNC_COMPLETE;
1018 }
1019 task->task_state_flags |= SAS_TASK_STATE_ABORTED;
1020 if (task->slow_task == NULL) {
1021 init_completion(&slow_task.completion);
1022 task->slow_task = &slow_task;
1023 }
1024 spin_unlock_irqrestore(&task->task_state_lock, flags);
1025 if (task->task_proto & SAS_PROTOCOL_SSP) {
1026 rc = sas_abort_task(task, tag);
1027 sas_execute_internal_abort_single(dev, tag, 0, NULL);
1028 } else if (task->task_proto & SAS_PROTOCOL_SATA ||
1029 task->task_proto & SAS_PROTOCOL_STP) {
1030 if (pm8001_ha->chip_id == chip_8006) {
1031 DECLARE_COMPLETION_ONSTACK(completion_reset);
1032 DECLARE_COMPLETION_ONSTACK(completion);
1033 struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
1034 port_id = phy->port->port_id;
1035
1036 /* 1. Set Device state as Recovery */
1037 pm8001_dev->setds_completion = &completion;
1038 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1039 pm8001_dev, DS_IN_RECOVERY);
1040 wait_for_completion(&completion);
1041
1042 /* 2. Send Phy Control Hard Reset */
1043 reinit_completion(&completion);
1044 phy->port_reset_status = PORT_RESET_TMO;
1045 phy->reset_success = false;
1046 phy->enable_completion = &completion;
1047 phy->reset_completion = &completion_reset;
1048 ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
1049 PHY_HARD_RESET);
1050 if (ret) {
1051 phy->enable_completion = NULL;
1052 phy->reset_completion = NULL;
1053 goto out;
1054 }
1055
1056 /* In the case of the reset timeout/fail we still
1057 * abort the command at the firmware. The assumption
1058 * here is that the drive is off doing something so
1059 * that it's not processing requests, and we want to
1060 * avoid getting a completion for this and either
1061 * leaking the task in libsas or losing the race and
1062 * getting a double free.
1063 */
1064 pm8001_dbg(pm8001_ha, MSG,
1065 "Waiting for local phy ctl\n");
1066 ret = wait_for_completion_timeout(&completion,
1067 PM8001_TASK_TIMEOUT * HZ);
1068 if (!ret || !phy->reset_success) {
1069 phy->enable_completion = NULL;
1070 phy->reset_completion = NULL;
1071 } else {
1072 /* 3. Wait for Port Reset complete or
1073 * Port reset TMO
1074 */
1075 pm8001_dbg(pm8001_ha, MSG,
1076 "Waiting for Port reset\n");
1077 ret = wait_for_completion_timeout(
1078 &completion_reset,
1079 PM8001_TASK_TIMEOUT * HZ);
1080 if (!ret)
1081 phy->reset_completion = NULL;
1082 WARN_ON(phy->port_reset_status ==
1083 PORT_RESET_TMO);
1084 if (phy->port_reset_status == PORT_RESET_TMO) {
1085 pm8001_dev_gone_notify(dev);
1086 PM8001_CHIP_DISP->hw_event_ack_req(
1087 pm8001_ha, 0,
1088 0x07, /*HW_EVENT_PHY_DOWN ack*/
1089 port_id, phy_id, 0, 0);
1090 goto out;
1091 }
1092 }
1093
1094 /*
1095 * 4. SATA Abort ALL
1096 * we wait for the task to be aborted so that the task
1097 * is removed from the ccb. on success the caller is
1098 * going to free the task.
1099 */
1100 ret = sas_execute_internal_abort_dev(dev, 0, NULL);
1101 if (ret)
1102 goto out;
1103 ret = wait_for_completion_timeout(
1104 &task->slow_task->completion,
1105 PM8001_TASK_TIMEOUT * HZ);
1106 if (!ret)
1107 goto out;
1108
1109 /* 5. Set Device State as Operational */
1110 reinit_completion(&completion);
1111 pm8001_dev->setds_completion = &completion;
1112 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1113 pm8001_dev, DS_OPERATIONAL);
1114 wait_for_completion(&completion);
1115 } else {
1116 ret = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1117 }
1118 rc = TMF_RESP_FUNC_COMPLETE;
1119 } else if (task->task_proto & SAS_PROTOCOL_SMP) {
1120 /* SMP */
1121 rc = sas_execute_internal_abort_single(dev, tag, 0, NULL);
1122
1123 }
1124 out:
1125 spin_lock_irqsave(&task->task_state_lock, flags);
1126 if (task->slow_task == &slow_task)
1127 task->slow_task = NULL;
1128 spin_unlock_irqrestore(&task->task_state_lock, flags);
1129 if (rc != TMF_RESP_FUNC_COMPLETE)
1130 pm8001_info(pm8001_ha, "rc= %d\n", rc);
1131 return rc;
1132 }
1133
pm8001_clear_task_set(struct domain_device * dev,u8 * lun)1134 int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
1135 {
1136 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1137 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1138
1139 pm8001_dbg(pm8001_ha, EH, "I_T_L_Q clear task set[%x]\n",
1140 pm8001_dev->device_id);
1141 return sas_clear_task_set(dev, lun);
1142 }
1143
pm8001_port_formed(struct asd_sas_phy * sas_phy)1144 void pm8001_port_formed(struct asd_sas_phy *sas_phy)
1145 {
1146 struct sas_ha_struct *sas_ha = sas_phy->ha;
1147 struct pm8001_hba_info *pm8001_ha = sas_ha->lldd_ha;
1148 struct pm8001_phy *phy = sas_phy->lldd_phy;
1149 struct asd_sas_port *sas_port = sas_phy->port;
1150 struct pm8001_port *port = phy->port;
1151
1152 if (!sas_port) {
1153 pm8001_dbg(pm8001_ha, FAIL, "Received null port\n");
1154 return;
1155 }
1156 sas_port->lldd_port = port;
1157 }
1158
pm8001_setds_completion(struct domain_device * dev)1159 void pm8001_setds_completion(struct domain_device *dev)
1160 {
1161 struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
1162 struct pm8001_device *pm8001_dev = dev->lldd_dev;
1163 DECLARE_COMPLETION_ONSTACK(completion_setstate);
1164
1165 if (pm8001_ha->chip_id != chip_8001) {
1166 pm8001_dev->setds_completion = &completion_setstate;
1167 PM8001_CHIP_DISP->set_dev_state_req(pm8001_ha,
1168 pm8001_dev, DS_OPERATIONAL);
1169 wait_for_completion(&completion_setstate);
1170 }
1171 }
1172
pm8001_tmf_aborted(struct sas_task * task)1173 void pm8001_tmf_aborted(struct sas_task *task)
1174 {
1175 struct pm8001_ccb_info *ccb = task->lldd_task;
1176
1177 if (ccb)
1178 ccb->task = NULL;
1179 }
1180