Lines Matching +full:phy +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0-or-later
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_softreset_ata_disk(struct domain_device *device);
17 struct domain_device *device);
18 static void hisi_sas_dev_gone(struct domain_device *device);
26 switch (fis->command) { in hisi_sas_get_ata_protocol()
79 switch (fis->features) { in hisi_sas_get_ata_protocol()
105 struct task_status_struct *ts = &task->task_status; in hisi_sas_sata_done()
106 struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf; in hisi_sas_sata_done()
109 u8 *iu = &status_buf->iu[0]; in hisi_sas_sata_done()
112 resp->frame_len = sizeof(struct dev_to_host_fis); in hisi_sas_sata_done()
113 memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis)); in hisi_sas_sata_done()
115 ts->buf_valid_size = sizeof(*resp); in hisi_sas_sata_done()
128 max -= SAS_LINK_RATE_1_5_GBPS; in hisi_sas_get_prog_phy_linkrate_mask()
135 static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device) in dev_to_hisi_hba() argument
137 return device->port->ha->lldd_ha; in dev_to_hisi_hba()
150 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) in hisi_sas_stop_phys()
157 void *bitmap = hisi_hba->slot_index_tags; in hisi_sas_slot_index_clear()
164 if (hisi_hba->hw->slot_index_alloc || in hisi_sas_slot_index_free()
166 spin_lock(&hisi_hba->lock); in hisi_sas_slot_index_free()
168 spin_unlock(&hisi_hba->lock); in hisi_sas_slot_index_free()
174 void *bitmap = hisi_hba->slot_index_tags; in hisi_sas_slot_index_set()
183 void *bitmap = hisi_hba->slot_index_tags; in hisi_sas_slot_index_alloc()
186 return scsi_cmd_to_rq(scsi_cmnd)->tag; in hisi_sas_slot_index_alloc()
188 spin_lock(&hisi_hba->lock); in hisi_sas_slot_index_alloc()
189 index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, in hisi_sas_slot_index_alloc()
190 hisi_hba->last_slot_index + 1); in hisi_sas_slot_index_alloc()
191 if (index >= hisi_hba->slot_index_count) { in hisi_sas_slot_index_alloc()
193 hisi_hba->slot_index_count, in hisi_sas_slot_index_alloc()
195 if (index >= hisi_hba->slot_index_count) { in hisi_sas_slot_index_alloc()
196 spin_unlock(&hisi_hba->lock); in hisi_sas_slot_index_alloc()
197 return -SAS_QUEUE_FULL; in hisi_sas_slot_index_alloc()
201 hisi_hba->last_slot_index = index; in hisi_sas_slot_index_alloc()
202 spin_unlock(&hisi_hba->lock); in hisi_sas_slot_index_alloc()
210 int device_id = slot->device_id; in hisi_sas_slot_task_free()
211 struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; in hisi_sas_slot_task_free()
214 struct device *dev = hisi_hba->dev; in hisi_sas_slot_task_free()
216 if (!task->lldd_task) in hisi_sas_slot_task_free()
219 task->lldd_task = NULL; in hisi_sas_slot_task_free()
221 if (!sas_protocol_ata(task->task_proto)) { in hisi_sas_slot_task_free()
222 if (slot->n_elem) { in hisi_sas_slot_task_free()
223 if (task->task_proto & SAS_PROTOCOL_SSP) in hisi_sas_slot_task_free()
224 dma_unmap_sg(dev, task->scatter, in hisi_sas_slot_task_free()
225 task->num_scatter, in hisi_sas_slot_task_free()
226 task->data_dir); in hisi_sas_slot_task_free()
228 dma_unmap_sg(dev, &task->smp_task.smp_req, in hisi_sas_slot_task_free()
231 if (slot->n_elem_dif) { in hisi_sas_slot_task_free()
232 struct sas_ssp_task *ssp_task = &task->ssp_task; in hisi_sas_slot_task_free()
233 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; in hisi_sas_slot_task_free()
237 task->data_dir); in hisi_sas_slot_task_free()
242 spin_lock(&sas_dev->lock); in hisi_sas_slot_task_free()
243 list_del_init(&slot->entry); in hisi_sas_slot_task_free()
244 spin_unlock(&sas_dev->lock); in hisi_sas_slot_task_free()
248 hisi_sas_slot_index_free(hisi_hba, slot->idx); in hisi_sas_slot_task_free()
255 hisi_hba->hw->prep_smp(hisi_hba, slot); in hisi_sas_task_prep_smp()
261 hisi_hba->hw->prep_ssp(hisi_hba, slot); in hisi_sas_task_prep_ssp()
267 hisi_hba->hw->prep_stp(hisi_hba, slot); in hisi_sas_task_prep_ata()
273 hisi_hba->hw->prep_abort(hisi_hba, slot); in hisi_sas_task_prep_abort()
279 struct device *dev = hisi_hba->dev; in hisi_sas_dma_unmap()
281 if (!sas_protocol_ata(task->task_proto) && n_elem) { in hisi_sas_dma_unmap()
282 if (task->num_scatter) { in hisi_sas_dma_unmap()
283 dma_unmap_sg(dev, task->scatter, task->num_scatter, in hisi_sas_dma_unmap()
284 task->data_dir); in hisi_sas_dma_unmap()
285 } else if (task->task_proto & SAS_PROTOCOL_SMP) { in hisi_sas_dma_unmap()
286 dma_unmap_sg(dev, &task->smp_task.smp_req, in hisi_sas_dma_unmap()
295 struct device *dev = hisi_hba->dev; in hisi_sas_dma_map()
298 if (sas_protocol_ata(task->task_proto)) { in hisi_sas_dma_map()
299 *n_elem = task->num_scatter; in hisi_sas_dma_map()
303 if (task->num_scatter) { in hisi_sas_dma_map()
304 *n_elem = dma_map_sg(dev, task->scatter, in hisi_sas_dma_map()
305 task->num_scatter, task->data_dir); in hisi_sas_dma_map()
307 rc = -ENOMEM; in hisi_sas_dma_map()
310 } else if (task->task_proto & SAS_PROTOCOL_SMP) { in hisi_sas_dma_map()
311 *n_elem = dma_map_sg(dev, &task->smp_task.smp_req, in hisi_sas_dma_map()
314 rc = -ENOMEM; in hisi_sas_dma_map()
317 req_len = sg_dma_len(&task->smp_task.smp_req); in hisi_sas_dma_map()
319 rc = -EINVAL; in hisi_sas_dma_map()
328 rc = -EINVAL; in hisi_sas_dma_map()
343 struct device *dev = hisi_hba->dev; in hisi_sas_dif_dma_unmap()
346 struct sas_ssp_task *ssp_task = &task->ssp_task; in hisi_sas_dif_dma_unmap()
347 struct scsi_cmnd *scsi_cmnd = ssp_task->cmd; in hisi_sas_dif_dma_unmap()
351 task->data_dir); in hisi_sas_dif_dma_unmap()
358 struct device *dev = hisi_hba->dev; in hisi_sas_dif_dma_map()
363 if (task->num_scatter) { in hisi_sas_dif_dma_map()
364 ssp_task = &task->ssp_task; in hisi_sas_dif_dma_map()
365 scsi_cmnd = ssp_task->cmd; in hisi_sas_dif_dma_map()
371 task->data_dir); in hisi_sas_dif_dma_map()
374 return -ENOMEM; in hisi_sas_dif_dma_map()
379 rc = -EINVAL; in hisi_sas_dif_dma_map()
389 scsi_prot_sg_count(scsi_cmnd), task->data_dir); in hisi_sas_dif_dma_map()
401 struct sas_task *task = slot->task; in hisi_sas_task_deliver()
404 spin_lock(&dq->lock); in hisi_sas_task_deliver()
405 wr_q_index = dq->wr_point; in hisi_sas_task_deliver()
406 dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; in hisi_sas_task_deliver()
407 list_add_tail(&slot->delivery, &dq->list); in hisi_sas_task_deliver()
408 spin_unlock(&dq->lock); in hisi_sas_task_deliver()
409 spin_lock(&sas_dev->lock); in hisi_sas_task_deliver()
410 list_add_tail(&slot->entry, &sas_dev->list); in hisi_sas_task_deliver()
411 spin_unlock(&sas_dev->lock); in hisi_sas_task_deliver()
413 dlvry_queue = dq->id; in hisi_sas_task_deliver()
416 slot->device_id = sas_dev->device_id; in hisi_sas_task_deliver()
417 slot->dlvry_queue = dlvry_queue; in hisi_sas_task_deliver()
418 slot->dlvry_queue_slot = dlvry_queue_slot; in hisi_sas_task_deliver()
419 cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; in hisi_sas_task_deliver()
420 slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; in hisi_sas_task_deliver()
422 task->lldd_task = slot; in hisi_sas_task_deliver()
424 memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); in hisi_sas_task_deliver()
429 switch (task->task_proto) { in hisi_sas_task_deliver()
450 WRITE_ONCE(slot->ready, 1); in hisi_sas_task_deliver()
452 spin_lock(&dq->lock); in hisi_sas_task_deliver()
453 hisi_hba->hw->start_delivery(dq); in hisi_sas_task_deliver()
454 spin_unlock(&dq->lock); in hisi_sas_task_deliver()
460 struct domain_device *device = task->dev; in hisi_sas_queue_command() local
461 struct asd_sas_port *sas_port = device->port; in hisi_sas_queue_command()
462 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_queue_command()
469 struct device *dev; in hisi_sas_queue_command()
473 struct task_status_struct *ts = &task->task_status; in hisi_sas_queue_command()
475 ts->resp = SAS_TASK_UNDELIVERED; in hisi_sas_queue_command()
476 ts->stat = SAS_PHY_DOWN; in hisi_sas_queue_command()
478 * libsas will use dev->port, should in hisi_sas_queue_command()
481 if (device->dev_type != SAS_SATA_DEV && !internal_abort) in hisi_sas_queue_command()
482 task->task_done(task); in hisi_sas_queue_command()
483 return -ECOMM; in hisi_sas_queue_command()
486 hisi_hba = dev_to_hisi_hba(device); in hisi_sas_queue_command()
487 dev = hisi_hba->dev; in hisi_sas_queue_command()
489 switch (task->task_proto) { in hisi_sas_queue_command()
495 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { in hisi_sas_queue_command()
497 return -EINVAL; in hisi_sas_queue_command()
499 down(&hisi_hba->sem); in hisi_sas_queue_command()
500 up(&hisi_hba->sem); in hisi_sas_queue_command()
505 dev_info(dev, "task prep: device %d not ready\n", in hisi_sas_queue_command()
506 sas_dev->device_id); in hisi_sas_queue_command()
508 dev_info(dev, "task prep: device %016llx not ready\n", in hisi_sas_queue_command()
509 SAS_ADDR(device->sas_addr)); in hisi_sas_queue_command()
511 return -ECOMM; in hisi_sas_queue_command()
515 if (!port->port_attached) { in hisi_sas_queue_command()
516 dev_info(dev, "task prep: %s port%d not attach device\n", in hisi_sas_queue_command()
517 dev_is_sata(device) ? "SATA/STP" : "SAS", in hisi_sas_queue_command()
518 device->port->id); in hisi_sas_queue_command()
520 return -ECOMM; in hisi_sas_queue_command()
523 if (task->uldd_task) { in hisi_sas_queue_command()
526 if (dev_is_sata(device)) { in hisi_sas_queue_command()
527 qc = task->uldd_task; in hisi_sas_queue_command()
528 scmd = qc->scsicmd; in hisi_sas_queue_command()
530 scmd = task->uldd_task; in hisi_sas_queue_command()
540 dq = &hisi_hba->dq[dq_index]; in hisi_sas_queue_command()
542 struct Scsi_Host *shost = hisi_hba->shost; in hisi_sas_queue_command()
543 struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; in hisi_sas_queue_command()
544 int queue = qmap->mq_map[raw_smp_processor_id()]; in hisi_sas_queue_command()
546 dq = &hisi_hba->dq[queue]; in hisi_sas_queue_command()
550 if (!hisi_hba->hw->prep_abort) in hisi_sas_queue_command()
553 if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags)) in hisi_sas_queue_command()
554 return -EIO; in hisi_sas_queue_command()
556 hisi_hba = dev_to_hisi_hba(device); in hisi_sas_queue_command()
558 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) in hisi_sas_queue_command()
559 return -EINVAL; in hisi_sas_queue_command()
562 dq = &hisi_hba->dq[task->abort_task.qid]; in hisi_sas_queue_command()
565 dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n", in hisi_sas_queue_command()
566 task->task_proto); in hisi_sas_queue_command()
567 return -EINVAL; in hisi_sas_queue_command()
574 if (!sas_protocol_ata(task->task_proto)) { in hisi_sas_queue_command()
580 if (!internal_abort && hisi_hba->hw->slot_index_alloc) in hisi_sas_queue_command()
581 rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device); in hisi_sas_queue_command()
588 slot = &hisi_hba->slot_info[rc]; in hisi_sas_queue_command()
589 slot->n_elem = n_elem; in hisi_sas_queue_command()
590 slot->n_elem_dif = n_elem_dif; in hisi_sas_queue_command()
591 slot->task = task; in hisi_sas_queue_command()
592 slot->port = port; in hisi_sas_queue_command()
594 slot->tmf = task->tmf; in hisi_sas_queue_command()
595 slot->is_internal = !!task->tmf || internal_abort; in hisi_sas_queue_command()
603 if (!sas_protocol_ata(task->task_proto)) in hisi_sas_queue_command()
615 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_bytes_dmaed() local
616 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_bytes_dmaed()
618 if (!phy->phy_attached) in hisi_sas_bytes_dmaed()
623 if (sas_phy->phy) { in hisi_sas_bytes_dmaed()
624 struct sas_phy *sphy = sas_phy->phy; in hisi_sas_bytes_dmaed()
626 sphy->negotiated_linkrate = sas_phy->linkrate; in hisi_sas_bytes_dmaed()
627 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; in hisi_sas_bytes_dmaed()
628 sphy->maximum_linkrate_hw = in hisi_sas_bytes_dmaed()
629 hisi_hba->hw->phy_get_max_linkrate(); in hisi_sas_bytes_dmaed()
630 if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) in hisi_sas_bytes_dmaed()
631 sphy->minimum_linkrate = phy->minimum_linkrate; in hisi_sas_bytes_dmaed()
633 if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) in hisi_sas_bytes_dmaed()
634 sphy->maximum_linkrate = phy->maximum_linkrate; in hisi_sas_bytes_dmaed()
637 if (phy->phy_type & PORT_TYPE_SAS) { in hisi_sas_bytes_dmaed()
640 id = (struct sas_identify_frame *)phy->frame_rcvd; in hisi_sas_bytes_dmaed()
641 id->dev_type = phy->identify.device_type; in hisi_sas_bytes_dmaed()
642 id->initiator_bits = SAS_PROTOCOL_ALL; in hisi_sas_bytes_dmaed()
643 id->target_bits = phy->identify.target_port_protocols; in hisi_sas_bytes_dmaed()
644 } else if (phy->phy_type & PORT_TYPE_SATA) { in hisi_sas_bytes_dmaed()
648 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; in hisi_sas_bytes_dmaed()
652 static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) in hisi_sas_alloc_dev() argument
654 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_alloc_dev()
656 int last = hisi_hba->last_dev_id; in hisi_sas_alloc_dev()
657 int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; in hisi_sas_alloc_dev()
660 spin_lock(&hisi_hba->lock); in hisi_sas_alloc_dev()
662 if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { in hisi_sas_alloc_dev()
663 int queue = i % hisi_hba->queue_count; in hisi_sas_alloc_dev()
664 struct hisi_sas_dq *dq = &hisi_hba->dq[queue]; in hisi_sas_alloc_dev()
666 hisi_hba->devices[i].device_id = i; in hisi_sas_alloc_dev()
667 sas_dev = &hisi_hba->devices[i]; in hisi_sas_alloc_dev()
668 sas_dev->dev_status = HISI_SAS_DEV_INIT; in hisi_sas_alloc_dev()
669 sas_dev->dev_type = device->dev_type; in hisi_sas_alloc_dev()
670 sas_dev->hisi_hba = hisi_hba; in hisi_sas_alloc_dev()
671 sas_dev->sas_device = device; in hisi_sas_alloc_dev()
672 sas_dev->dq = dq; in hisi_sas_alloc_dev()
673 spin_lock_init(&sas_dev->lock); in hisi_sas_alloc_dev()
674 INIT_LIST_HEAD(&hisi_hba->devices[i].list); in hisi_sas_alloc_dev()
679 hisi_hba->last_dev_id = i; in hisi_sas_alloc_dev()
680 spin_unlock(&hisi_hba->lock); in hisi_sas_alloc_dev()
687 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_tmf_aborted()
688 struct domain_device *device = task->dev; in hisi_sas_tmf_aborted() local
689 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_tmf_aborted()
690 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; in hisi_sas_tmf_aborted()
694 &hisi_hba->cq[slot->dlvry_queue]; in hisi_sas_tmf_aborted()
699 synchronize_irq(cq->irq_no); in hisi_sas_tmf_aborted()
700 slot->task = NULL; in hisi_sas_tmf_aborted()
705 static int hisi_sas_init_device(struct domain_device *device) in hisi_sas_init_device() argument
710 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_init_device()
712 switch (device->dev_type) { in hisi_sas_init_device()
716 while (retry-- > 0) { in hisi_sas_init_device()
717 rc = sas_clear_task_set(device, lun.scsi_lun); in hisi_sas_init_device()
719 hisi_sas_release_task(hisi_hba, device); in hisi_sas_init_device()
735 * a. When probing the device, libsas/libata already issues a in hisi_sas_init_device()
736 * hard reset in sas_probe_sata() -> ata_sas_async_probe(). in hisi_sas_init_device()
741 while (retry-- > 0) { in hisi_sas_init_device()
742 rc = hisi_sas_softreset_ata_disk(device); in hisi_sas_init_device()
757 struct hisi_sas_device *sas_dev = ddev->lldd_dev; in hisi_sas_slave_alloc()
767 sas_dev->dev_status = HISI_SAS_DEV_NORMAL; in hisi_sas_slave_alloc()
772 static int hisi_sas_dev_found(struct domain_device *device) in hisi_sas_dev_found() argument
774 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_dev_found()
775 struct domain_device *parent_dev = device->parent; in hisi_sas_dev_found()
777 struct device *dev = hisi_hba->dev; in hisi_sas_dev_found()
780 if (hisi_hba->hw->alloc_dev) in hisi_sas_dev_found()
781 sas_dev = hisi_hba->hw->alloc_dev(device); in hisi_sas_dev_found()
783 sas_dev = hisi_sas_alloc_dev(device); in hisi_sas_dev_found()
787 return -EINVAL; in hisi_sas_dev_found()
790 device->lldd_dev = sas_dev; in hisi_sas_dev_found()
791 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); in hisi_sas_dev_found()
793 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { in hisi_sas_dev_found()
795 u8 phy_num = parent_dev->ex_dev.num_phys; in hisi_sas_dev_found()
796 struct ex_phy *phy; in hisi_sas_dev_found() local
799 phy = &parent_dev->ex_dev.ex_phy[phy_no]; in hisi_sas_dev_found()
800 if (SAS_ADDR(phy->attached_sas_addr) == in hisi_sas_dev_found()
801 SAS_ADDR(device->sas_addr)) in hisi_sas_dev_found()
808 SAS_ADDR(device->sas_addr), in hisi_sas_dev_found()
809 SAS_ADDR(parent_dev->sas_addr)); in hisi_sas_dev_found()
810 rc = -EINVAL; in hisi_sas_dev_found()
816 sas_dev->device_id, sas_dev->dev_type); in hisi_sas_dev_found()
821 hisi_sas_dev_gone(device); in hisi_sas_dev_found()
843 hisi_hba->hw->phys_init(hisi_hba); in hisi_sas_scan_start()
850 struct sas_ha_struct *sha = &hisi_hba->sha; in hisi_sas_scan_finished()
852 /* Wait for PHY up interrupt to occur */ in hisi_sas_scan_finished()
864 struct hisi_sas_phy *phy = in hisi_sas_phyup_work_common() local
865 container_of(work, typeof(*phy), works[event]); in hisi_sas_phyup_work_common()
866 struct hisi_hba *hisi_hba = phy->hisi_hba; in hisi_sas_phyup_work_common()
867 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phyup_work_common()
868 int phy_no = sas_phy->id; in hisi_sas_phyup_work_common()
870 phy->wait_phyup_cnt = 0; in hisi_sas_phyup_work_common()
871 if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) in hisi_sas_phyup_work_common()
872 hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); in hisi_sas_phyup_work_common()
883 struct hisi_sas_phy *phy = in hisi_sas_linkreset_work() local
884 container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]); in hisi_sas_linkreset_work()
885 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_linkreset_work()
892 struct hisi_sas_phy *phy = in hisi_sas_phyup_pm_work() local
893 container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]); in hisi_sas_phyup_pm_work()
894 struct hisi_hba *hisi_hba = phy->hisi_hba; in hisi_sas_phyup_pm_work()
895 struct device *dev = hisi_hba->dev; in hisi_sas_phyup_pm_work()
907 bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, in hisi_sas_notify_phy_event() argument
910 struct hisi_hba *hisi_hba = phy->hisi_hba; in hisi_sas_notify_phy_event()
915 return queue_work(hisi_hba->wq, &phy->works[event]); in hisi_sas_notify_phy_event()
921 struct hisi_sas_phy *phy = from_timer(phy, t, timer); in hisi_sas_wait_phyup_timedout() local
922 struct hisi_hba *hisi_hba = phy->hisi_hba; in hisi_sas_wait_phyup_timedout()
923 struct device *dev = hisi_hba->dev; in hisi_sas_wait_phyup_timedout()
924 int phy_no = phy->sas_phy.id; in hisi_sas_wait_phyup_timedout()
926 dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no); in hisi_sas_wait_phyup_timedout()
927 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); in hisi_sas_wait_phyup_timedout()
934 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_phy_oob_ready() local
935 struct device *dev = hisi_hba->dev; in hisi_sas_phy_oob_ready()
938 dev_dbg(dev, "phy%d OOB ready\n", phy_no); in hisi_sas_phy_oob_ready()
939 spin_lock_irqsave(&phy->lock, flags); in hisi_sas_phy_oob_ready()
940 if (phy->phy_attached) { in hisi_sas_phy_oob_ready()
941 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_phy_oob_ready()
945 if (!timer_pending(&phy->timer)) { in hisi_sas_phy_oob_ready()
946 if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) { in hisi_sas_phy_oob_ready()
947 phy->wait_phyup_cnt++; in hisi_sas_phy_oob_ready()
948 phy->timer.expires = jiffies + in hisi_sas_phy_oob_ready()
950 add_timer(&phy->timer); in hisi_sas_phy_oob_ready()
951 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_phy_oob_ready()
955 dev_warn(dev, "phy%d failed to come up %d times, giving up\n", in hisi_sas_phy_oob_ready()
956 phy_no, phy->wait_phyup_cnt); in hisi_sas_phy_oob_ready()
957 phy->wait_phyup_cnt = 0; in hisi_sas_phy_oob_ready()
959 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_phy_oob_ready()
966 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_phy_init() local
967 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phy_init()
970 phy->hisi_hba = hisi_hba; in hisi_sas_phy_init()
971 phy->port = NULL; in hisi_sas_phy_init()
972 phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; in hisi_sas_phy_init()
973 phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate(); in hisi_sas_phy_init()
974 sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; in hisi_sas_phy_init()
975 sas_phy->class = SAS; in hisi_sas_phy_init()
976 sas_phy->iproto = SAS_PROTOCOL_ALL; in hisi_sas_phy_init()
977 sas_phy->tproto = 0; in hisi_sas_phy_init()
978 sas_phy->type = PHY_TYPE_PHYSICAL; in hisi_sas_phy_init()
979 sas_phy->role = PHY_ROLE_INITIATOR; in hisi_sas_phy_init()
980 sas_phy->oob_mode = OOB_NOT_CONNECTED; in hisi_sas_phy_init()
981 sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN; in hisi_sas_phy_init()
982 sas_phy->id = phy_no; in hisi_sas_phy_init()
983 sas_phy->sas_addr = &hisi_hba->sas_addr[0]; in hisi_sas_phy_init()
984 sas_phy->frame_rcvd = &phy->frame_rcvd[0]; in hisi_sas_phy_init()
985 sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata; in hisi_sas_phy_init()
986 sas_phy->lldd_phy = phy; in hisi_sas_phy_init()
989 INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]); in hisi_sas_phy_init()
991 spin_lock_init(&phy->lock); in hisi_sas_phy_init()
993 timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0); in hisi_sas_phy_init()
999 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_phy_enable() local
1000 struct asd_sas_phy *aphy = &phy->sas_phy; in hisi_sas_phy_enable()
1001 struct sas_phy *sphy = aphy->phy; in hisi_sas_phy_enable()
1004 spin_lock_irqsave(&phy->lock, flags); in hisi_sas_phy_enable()
1008 if (!phy->enable) in hisi_sas_phy_enable()
1009 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; in hisi_sas_phy_enable()
1010 hisi_hba->hw->phy_start(hisi_hba, phy_no); in hisi_sas_phy_enable()
1012 sphy->negotiated_linkrate = SAS_PHY_DISABLED; in hisi_sas_phy_enable()
1013 hisi_hba->hw->phy_disable(hisi_hba, phy_no); in hisi_sas_phy_enable()
1015 phy->enable = enable; in hisi_sas_phy_enable()
1016 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_phy_enable()
1022 struct sas_ha_struct *sas_ha = sas_phy->ha; in hisi_sas_port_notify_formed()
1023 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; in hisi_sas_port_notify_formed()
1024 struct hisi_sas_phy *phy = sas_phy->lldd_phy; in hisi_sas_port_notify_formed() local
1025 struct asd_sas_port *sas_port = sas_phy->port; in hisi_sas_port_notify_formed()
1033 spin_lock_irqsave(&hisi_hba->lock, flags); in hisi_sas_port_notify_formed()
1034 port->port_attached = 1; in hisi_sas_port_notify_formed()
1035 port->id = phy->port_id; in hisi_sas_port_notify_formed()
1036 phy->port = port; in hisi_sas_port_notify_formed()
1037 sas_port->lldd_port = port; in hisi_sas_port_notify_formed()
1038 spin_unlock_irqrestore(&hisi_hba->lock, flags); in hisi_sas_port_notify_formed()
1048 ts = &task->task_status; in hisi_sas_do_release_task()
1050 ts->resp = SAS_TASK_COMPLETE; in hisi_sas_do_release_task()
1051 ts->stat = SAS_ABORTED_TASK; in hisi_sas_do_release_task()
1052 spin_lock_irqsave(&task->task_state_lock, flags); in hisi_sas_do_release_task()
1053 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; in hisi_sas_do_release_task()
1054 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP) in hisi_sas_do_release_task()
1055 task->task_state_flags |= SAS_TASK_STATE_DONE; in hisi_sas_do_release_task()
1056 spin_unlock_irqrestore(&task->task_state_lock, flags); in hisi_sas_do_release_task()
1063 struct domain_device *device) in hisi_sas_release_task() argument
1066 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_release_task()
1068 list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) in hisi_sas_release_task()
1069 hisi_sas_do_release_task(hisi_hba, slot->task, slot); in hisi_sas_release_task()
1075 struct domain_device *device; in hisi_sas_release_tasks() local
1079 sas_dev = &hisi_hba->devices[i]; in hisi_sas_release_tasks()
1080 device = sas_dev->sas_device; in hisi_sas_release_tasks()
1082 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || in hisi_sas_release_tasks()
1083 !device) in hisi_sas_release_tasks()
1086 hisi_sas_release_task(hisi_hba, device); in hisi_sas_release_tasks()
1092 struct domain_device *device) in hisi_sas_dereg_device() argument
1094 if (hisi_hba->hw->dereg_device) in hisi_sas_dereg_device()
1095 hisi_hba->hw->dereg_device(hisi_hba, device); in hisi_sas_dereg_device()
1103 struct domain_device *device = sas_dev->sas_device; in hisi_sas_internal_task_abort_dev() local
1104 struct hisi_hba *hisi_hba = sas_dev->hisi_hba; in hisi_sas_internal_task_abort_dev()
1107 for (i = 0; i < hisi_hba->cq_nvecs; i++) { in hisi_sas_internal_task_abort_dev()
1108 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; in hisi_sas_internal_task_abort_dev()
1109 const struct cpumask *mask = cq->irq_mask; in hisi_sas_internal_task_abort_dev()
1113 rc = sas_execute_internal_abort_dev(device, i, &data); in hisi_sas_internal_task_abort_dev()
1121 static void hisi_sas_dev_gone(struct domain_device *device) in hisi_sas_dev_gone() argument
1123 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_dev_gone()
1124 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_dev_gone()
1125 struct device *dev = hisi_hba->dev; in hisi_sas_dev_gone()
1129 sas_dev->device_id, sas_dev->dev_type); in hisi_sas_dev_gone()
1131 down(&hisi_hba->sem); in hisi_sas_dev_gone()
1132 if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { in hisi_sas_dev_gone()
1135 hisi_sas_dereg_device(hisi_hba, device); in hisi_sas_dev_gone()
1137 ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); in hisi_sas_dev_gone()
1138 device->lldd_dev = NULL; in hisi_sas_dev_gone()
1141 if (hisi_hba->hw->free_device) in hisi_sas_dev_gone()
1142 hisi_hba->hw->free_device(sas_dev); in hisi_sas_dev_gone()
1146 sas_dev->dev_type = SAS_PHY_UNUSED; in hisi_sas_dev_gone()
1147 sas_dev->sas_device = NULL; in hisi_sas_dev_gone()
1148 up(&hisi_hba->sem); in hisi_sas_dev_gone()
1156 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_phy_set_linkrate() local
1157 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phy_set_linkrate()
1160 if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS) in hisi_sas_phy_set_linkrate()
1161 return -EINVAL; in hisi_sas_phy_set_linkrate()
1163 if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) { in hisi_sas_phy_set_linkrate()
1164 max = sas_phy->phy->maximum_linkrate; in hisi_sas_phy_set_linkrate()
1165 min = r->minimum_linkrate; in hisi_sas_phy_set_linkrate()
1166 } else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) { in hisi_sas_phy_set_linkrate()
1167 max = r->maximum_linkrate; in hisi_sas_phy_set_linkrate()
1168 min = sas_phy->phy->minimum_linkrate; in hisi_sas_phy_set_linkrate()
1170 return -EINVAL; in hisi_sas_phy_set_linkrate()
1175 sas_phy->phy->maximum_linkrate = max; in hisi_sas_phy_set_linkrate()
1176 sas_phy->phy->minimum_linkrate = min; in hisi_sas_phy_set_linkrate()
1180 hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r); in hisi_sas_phy_set_linkrate()
1189 struct hisi_sas_phy *phy = container_of(sas_phy, in hisi_sas_control_phy() local
1191 struct sas_ha_struct *sas_ha = sas_phy->ha; in hisi_sas_control_phy()
1192 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; in hisi_sas_control_phy()
1193 struct device *dev = hisi_hba->dev; in hisi_sas_control_phy()
1195 int phy_no = sas_phy->id; in hisi_sas_control_phy()
1196 u8 sts = phy->phy_attached; in hisi_sas_control_phy()
1199 down(&hisi_hba->sem); in hisi_sas_control_phy()
1200 phy->reset_completion = &completion; in hisi_sas_control_phy()
1204 hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no); in hisi_sas_control_phy()
1222 if (hisi_hba->hw->get_events) { in hisi_sas_control_phy()
1223 hisi_hba->hw->get_events(hisi_hba, phy_no); in hisi_sas_control_phy()
1229 ret = -EOPNOTSUPP; in hisi_sas_control_phy()
1235 dev_warn(dev, "phy%d wait phyup timed out for func %d\n", in hisi_sas_control_phy()
1237 if (phy->in_reset) in hisi_sas_control_phy()
1238 ret = -ETIMEDOUT; in hisi_sas_control_phy()
1242 phy->reset_completion = NULL; in hisi_sas_control_phy()
1244 up(&hisi_hba->sem); in hisi_sas_control_phy()
1262 static int hisi_sas_softreset_ata_disk(struct domain_device *device) in hisi_sas_softreset_ata_disk() argument
1265 struct ata_port *ap = device->sata_dev.ap; in hisi_sas_softreset_ata_disk()
1268 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_softreset_ata_disk()
1269 struct device *dev = hisi_hba->dev; in hisi_sas_softreset_ata_disk()
1274 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); in hisi_sas_softreset_ata_disk()
1275 rc = sas_execute_ata_cmd(device, fis, -1); in hisi_sas_softreset_ata_disk()
1284 hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); in hisi_sas_softreset_ata_disk()
1285 rc = sas_execute_ata_cmd(device, fis, -1); in hisi_sas_softreset_ata_disk()
1287 dev_err(dev, "ata disk %016llx de-reset failed\n", in hisi_sas_softreset_ata_disk()
1288 SAS_ADDR(device->sas_addr)); in hisi_sas_softreset_ata_disk()
1292 SAS_ADDR(device->sas_addr)); in hisi_sas_softreset_ata_disk()
1296 hisi_sas_release_task(hisi_hba, device); in hisi_sas_softreset_ata_disk()
1303 u32 state = hisi_hba->hw->get_phys_state(hisi_hba); in hisi_sas_refresh_port_id()
1307 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; in hisi_sas_refresh_port_id()
1308 struct domain_device *device = sas_dev->sas_device; in hisi_sas_refresh_port_id() local
1311 struct hisi_sas_phy *phy = NULL; in hisi_sas_refresh_port_id() local
1314 if ((sas_dev->dev_type == SAS_PHY_UNUSED) in hisi_sas_refresh_port_id()
1315 || !device || !device->port) in hisi_sas_refresh_port_id()
1318 sas_port = device->port; in hisi_sas_refresh_port_id()
1321 spin_lock(&sas_port->phy_list_lock); in hisi_sas_refresh_port_id()
1322 list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) in hisi_sas_refresh_port_id()
1323 if (state & BIT(sas_phy->id)) { in hisi_sas_refresh_port_id()
1324 phy = sas_phy->lldd_phy; in hisi_sas_refresh_port_id()
1327 spin_unlock(&sas_port->phy_list_lock); in hisi_sas_refresh_port_id()
1329 if (phy) { in hisi_sas_refresh_port_id()
1330 port->id = phy->port_id; in hisi_sas_refresh_port_id()
1332 /* Update linkrate of directly attached device. */ in hisi_sas_refresh_port_id()
1333 if (!device->parent) in hisi_sas_refresh_port_id()
1334 device->linkrate = phy->sas_phy.linkrate; in hisi_sas_refresh_port_id()
1336 hisi_hba->hw->setup_itct(hisi_hba, sas_dev); in hisi_sas_refresh_port_id()
1338 port->id = 0xff; in hisi_sas_refresh_port_id()
1344 struct sas_ha_struct *sas_ha = &hisi_hba->sha; in hisi_sas_rescan_topology()
1348 for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { in hisi_sas_rescan_topology()
1349 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_rescan_topology() local
1350 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_rescan_topology()
1351 struct asd_sas_port *sas_port = sas_phy->port; in hisi_sas_rescan_topology()
1354 if (!sas_phy->phy->enabled) in hisi_sas_rescan_topology()
1357 /* Report PHY state change to libsas */ in hisi_sas_rescan_topology()
1359 if (do_port_check && sas_port && sas_port->port_dev) { in hisi_sas_rescan_topology()
1360 struct domain_device *dev = sas_port->port_dev; in hisi_sas_rescan_topology()
1364 if (dev_is_expander(dev->dev_type)) in hisi_sas_rescan_topology()
1375 * reset calls from hisi_sas_clear_nexus_ha() -> in hisi_sas_rescan_topology()
1384 struct domain_device *device; in hisi_sas_reset_init_all_devices() local
1388 sas_dev = &hisi_hba->devices[i]; in hisi_sas_reset_init_all_devices()
1389 device = sas_dev->sas_device; in hisi_sas_reset_init_all_devices()
1391 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) in hisi_sas_reset_init_all_devices()
1394 hisi_sas_init_device(device); in hisi_sas_reset_init_all_devices()
1400 struct domain_device *device) in hisi_sas_send_ata_reset_each_phy() argument
1402 struct ata_port *ap = device->sata_dev.ap; in hisi_sas_send_ata_reset_each_phy()
1403 struct device *dev = hisi_hba->dev; in hisi_sas_send_ata_reset_each_phy()
1409 for (i = 0; i < hisi_hba->n_phy; i++) { in hisi_sas_send_ata_reset_each_phy()
1410 if (!(sas_port->phy_mask & BIT(i))) in hisi_sas_send_ata_reset_each_phy()
1416 hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); in hisi_sas_send_ata_reset_each_phy()
1417 rc = sas_execute_ata_cmd(device, fis, i); in hisi_sas_send_ata_reset_each_phy()
1419 dev_err(dev, "phy%d ata reset failed rc=%d\n", in hisi_sas_send_ata_reset_each_phy()
1429 struct device *dev = hisi_hba->dev; in hisi_sas_terminate_stp_reject()
1433 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; in hisi_sas_terminate_stp_reject()
1434 struct domain_device *device = sas_dev->sas_device; in hisi_sas_terminate_stp_reject() local
1436 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device) in hisi_sas_terminate_stp_reject()
1444 for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) { in hisi_sas_terminate_stp_reject()
1445 struct hisi_sas_port *port = &hisi_hba->port[port_no]; in hisi_sas_terminate_stp_reject()
1446 struct asd_sas_port *sas_port = &port->sas_port; in hisi_sas_terminate_stp_reject()
1447 struct domain_device *port_dev = sas_port->port_dev; in hisi_sas_terminate_stp_reject()
1448 struct domain_device *device; in hisi_sas_terminate_stp_reject() local
1450 if (!port_dev || !dev_is_expander(port_dev->dev_type)) in hisi_sas_terminate_stp_reject()
1453 /* Try to find a SATA device */ in hisi_sas_terminate_stp_reject()
1454 list_for_each_entry(device, &sas_port->dev_list, in hisi_sas_terminate_stp_reject()
1456 if (dev_is_sata(device)) { in hisi_sas_terminate_stp_reject()
1459 device); in hisi_sas_terminate_stp_reject()
1468 struct Scsi_Host *shost = hisi_hba->shost; in hisi_sas_controller_reset_prepare()
1470 hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba); in hisi_sas_controller_reset_prepare()
1473 hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000); in hisi_sas_controller_reset_prepare()
1475 del_timer_sync(&hisi_hba->timer); in hisi_sas_controller_reset_prepare()
1477 set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); in hisi_sas_controller_reset_prepare()
1483 struct Scsi_Host *shost = hisi_hba->shost; in hisi_sas_controller_reset_done()
1486 hisi_hba->hw->phys_init(hisi_hba); in hisi_sas_controller_reset_done()
1489 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); in hisi_sas_controller_reset_done()
1491 if (hisi_hba->reject_stp_links_msk) in hisi_sas_controller_reset_done()
1495 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); in hisi_sas_controller_reset_done()
1496 up(&hisi_hba->sem); in hisi_sas_controller_reset_done()
1498 hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state); in hisi_sas_controller_reset_done()
1504 if (!hisi_hba->hw->soft_reset) in hisi_sas_controller_prereset()
1505 return -1; in hisi_sas_controller_prereset()
1507 down(&hisi_hba->sem); in hisi_sas_controller_prereset()
1508 if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) { in hisi_sas_controller_prereset()
1509 up(&hisi_hba->sem); in hisi_sas_controller_prereset()
1510 return -1; in hisi_sas_controller_prereset()
1513 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) in hisi_sas_controller_prereset()
1514 hisi_hba->hw->debugfs_snapshot_regs(hisi_hba); in hisi_sas_controller_prereset()
1521 struct device *dev = hisi_hba->dev; in hisi_sas_controller_reset()
1522 struct Scsi_Host *shost = hisi_hba->shost; in hisi_sas_controller_reset()
1528 rc = hisi_hba->hw->soft_reset(hisi_hba); in hisi_sas_controller_reset()
1531 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); in hisi_sas_controller_reset()
1532 up(&hisi_hba->sem); in hisi_sas_controller_reset()
1534 clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags); in hisi_sas_controller_reset()
1537 clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); in hisi_sas_controller_reset()
1548 struct domain_device *device = task->dev; in hisi_sas_abort_task() local
1549 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_abort_task()
1551 struct device *dev; in hisi_sas_abort_task()
1558 hisi_hba = dev_to_hisi_hba(task->dev); in hisi_sas_abort_task()
1559 dev = hisi_hba->dev; in hisi_sas_abort_task()
1561 spin_lock_irqsave(&task->task_state_lock, flags); in hisi_sas_abort_task()
1562 if (task->task_state_flags & SAS_TASK_STATE_DONE) { in hisi_sas_abort_task()
1563 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_abort_task()
1571 cq = &hisi_hba->cq[slot->dlvry_queue]; in hisi_sas_abort_task()
1572 synchronize_irq(cq->irq_no); in hisi_sas_abort_task()
1574 spin_unlock_irqrestore(&task->task_state_lock, flags); in hisi_sas_abort_task()
1578 task->task_state_flags |= SAS_TASK_STATE_ABORTED; in hisi_sas_abort_task()
1579 spin_unlock_irqrestore(&task->task_state_lock, flags); in hisi_sas_abort_task()
1581 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { in hisi_sas_abort_task()
1582 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_abort_task()
1583 u16 tag = slot->idx; in hisi_sas_abort_task()
1587 rc2 = sas_execute_internal_abort_single(device, tag, in hisi_sas_abort_task()
1588 slot->dlvry_queue, &internal_abort_data); in hisi_sas_abort_task()
1595 * If the TMF finds that the IO is not in the device and also in hisi_sas_abort_task()
1602 if (task->lldd_task) in hisi_sas_abort_task()
1605 } else if (task->task_proto & SAS_PROTOCOL_SATA || in hisi_sas_abort_task()
1606 task->task_proto & SAS_PROTOCOL_STP) { in hisi_sas_abort_task()
1607 if (task->dev->dev_type == SAS_SATA_DEV) { in hisi_sas_abort_task()
1613 hisi_sas_dereg_device(hisi_hba, device); in hisi_sas_abort_task()
1614 rc = hisi_sas_softreset_ata_disk(device); in hisi_sas_abort_task()
1616 } else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) { in hisi_sas_abort_task()
1618 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_abort_task()
1619 u32 tag = slot->idx; in hisi_sas_abort_task()
1620 struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; in hisi_sas_abort_task()
1622 rc = sas_execute_internal_abort_single(device, in hisi_sas_abort_task()
1623 tag, slot->dlvry_queue, in hisi_sas_abort_task()
1626 task->lldd_task) { in hisi_sas_abort_task()
1631 synchronize_irq(cq->irq_no); in hisi_sas_abort_task()
1632 slot->task = NULL; in hisi_sas_abort_task()
1642 static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun) in hisi_sas_abort_task_set() argument
1644 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_abort_task_set()
1645 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_abort_task_set()
1646 struct device *dev = hisi_hba->dev; in hisi_sas_abort_task_set()
1654 hisi_sas_dereg_device(hisi_hba, device); in hisi_sas_abort_task_set()
1656 rc = sas_abort_task_set(device, lun); in hisi_sas_abort_task_set()
1658 hisi_sas_release_task(hisi_hba, device); in hisi_sas_abort_task_set()
1663 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) in hisi_sas_debug_I_T_nexus_reset() argument
1665 struct sas_phy *local_phy = sas_get_local_phy(device); in hisi_sas_debug_I_T_nexus_reset()
1666 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_debug_I_T_nexus_reset()
1667 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_debug_I_T_nexus_reset()
1668 struct sas_ha_struct *sas_ha = &hisi_hba->sha; in hisi_sas_debug_I_T_nexus_reset()
1671 if (!local_phy->enabled) { in hisi_sas_debug_I_T_nexus_reset()
1673 return -ENODEV; in hisi_sas_debug_I_T_nexus_reset()
1678 sas_ha->sas_phy[local_phy->number]; in hisi_sas_debug_I_T_nexus_reset()
1679 struct hisi_sas_phy *phy = in hisi_sas_debug_I_T_nexus_reset() local
1683 spin_lock_irqsave(&phy->lock, flags); in hisi_sas_debug_I_T_nexus_reset()
1684 phy->in_reset = 1; in hisi_sas_debug_I_T_nexus_reset()
1685 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_debug_I_T_nexus_reset()
1688 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || in hisi_sas_debug_I_T_nexus_reset()
1689 !dev_is_sata(device)) ? true : false; in hisi_sas_debug_I_T_nexus_reset()
1696 sas_ha->sas_phy[local_phy->number]; in hisi_sas_debug_I_T_nexus_reset()
1697 struct hisi_sas_phy *phy = in hisi_sas_debug_I_T_nexus_reset() local
1701 spin_lock_irqsave(&phy->lock, flags); in hisi_sas_debug_I_T_nexus_reset()
1702 phy->in_reset = 0; in hisi_sas_debug_I_T_nexus_reset()
1703 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_debug_I_T_nexus_reset()
1705 /* report PHY down if timed out */ in hisi_sas_debug_I_T_nexus_reset()
1706 if (rc == -ETIMEDOUT) in hisi_sas_debug_I_T_nexus_reset()
1707 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL); in hisi_sas_debug_I_T_nexus_reset()
1714 /* Remote phy */ in hisi_sas_debug_I_T_nexus_reset()
1715 if (dev_is_sata(device)) { in hisi_sas_debug_I_T_nexus_reset()
1716 rc = sas_ata_wait_after_reset(device, in hisi_sas_debug_I_T_nexus_reset()
1725 static int hisi_sas_I_T_nexus_reset(struct domain_device *device) in hisi_sas_I_T_nexus_reset() argument
1727 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_I_T_nexus_reset()
1728 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_I_T_nexus_reset()
1729 struct device *dev = hisi_hba->dev; in hisi_sas_I_T_nexus_reset()
1737 hisi_sas_dereg_device(hisi_hba, device); in hisi_sas_I_T_nexus_reset()
1739 rc = hisi_sas_debug_I_T_nexus_reset(device); in hisi_sas_I_T_nexus_reset()
1740 if (rc == TMF_RESP_FUNC_COMPLETE && dev_is_sata(device)) { in hisi_sas_I_T_nexus_reset()
1743 rc = hisi_sas_softreset_ata_disk(device); in hisi_sas_I_T_nexus_reset()
1745 case -ECOMM: in hisi_sas_I_T_nexus_reset()
1746 rc = -ENODEV; in hisi_sas_I_T_nexus_reset()
1749 case -EMSGSIZE: in hisi_sas_I_T_nexus_reset()
1750 case -EIO: in hisi_sas_I_T_nexus_reset()
1751 local_phy = sas_get_local_phy(device); in hisi_sas_I_T_nexus_reset()
1754 local_phy->enabled = 0; in hisi_sas_I_T_nexus_reset()
1755 dev_err(dev, "Disabled local phy of ATA disk %016llx due to softreset fail (%d)\n", in hisi_sas_I_T_nexus_reset()
1756 SAS_ADDR(device->sas_addr), rc); in hisi_sas_I_T_nexus_reset()
1757 rc = -ENODEV; in hisi_sas_I_T_nexus_reset()
1766 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) in hisi_sas_I_T_nexus_reset()
1767 hisi_sas_release_task(hisi_hba, device); in hisi_sas_I_T_nexus_reset()
1772 static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) in hisi_sas_lu_reset() argument
1774 struct hisi_sas_device *sas_dev = device->lldd_dev; in hisi_sas_lu_reset()
1775 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_lu_reset()
1776 struct device *dev = hisi_hba->dev; in hisi_sas_lu_reset()
1785 hisi_sas_dereg_device(hisi_hba, device); in hisi_sas_lu_reset()
1787 if (dev_is_sata(device)) { in hisi_sas_lu_reset()
1788 struct sas_phy *phy; in hisi_sas_lu_reset() local
1790 phy = sas_get_local_phy(device); in hisi_sas_lu_reset()
1792 rc = sas_phy_reset(phy, true); in hisi_sas_lu_reset()
1795 hisi_sas_release_task(hisi_hba, device); in hisi_sas_lu_reset()
1796 sas_put_local_phy(phy); in hisi_sas_lu_reset()
1798 rc = sas_lu_reset(device, lun); in hisi_sas_lu_reset()
1800 hisi_sas_release_task(hisi_hba, device); in hisi_sas_lu_reset()
1804 dev_err(dev, "lu_reset: for device[%d]:rc= %d\n", in hisi_sas_lu_reset()
1805 sas_dev->device_id, rc); in hisi_sas_lu_reset()
1811 struct domain_device *device = data; in hisi_sas_async_I_T_nexus_reset() local
1812 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_async_I_T_nexus_reset()
1815 rc = hisi_sas_debug_I_T_nexus_reset(device); in hisi_sas_async_I_T_nexus_reset()
1817 dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n", in hisi_sas_async_I_T_nexus_reset()
1818 SAS_ADDR(device->sas_addr), rc); in hisi_sas_async_I_T_nexus_reset()
1823 struct hisi_hba *hisi_hba = sas_ha->lldd_ha; in hisi_sas_clear_nexus_ha()
1828 queue_work(hisi_hba->wq, &r.work); in hisi_sas_clear_nexus_ha()
1836 struct hisi_sas_device *sas_dev = &hisi_hba->devices[i]; in hisi_sas_clear_nexus_ha()
1837 struct domain_device *device = sas_dev->sas_device; in hisi_sas_clear_nexus_ha() local
1839 if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device || in hisi_sas_clear_nexus_ha()
1840 dev_is_expander(device->dev_type)) in hisi_sas_clear_nexus_ha()
1844 device, &async); in hisi_sas_clear_nexus_ha()
1859 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { in hisi_sas_query_task()
1860 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_query_task()
1861 u32 tag = slot->idx; in hisi_sas_query_task()
1867 /* The task is not in Lun or failed, reset the phy */ in hisi_sas_query_task()
1882 struct domain_device *device = task->dev; in hisi_sas_internal_abort_timeout() local
1883 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); in hisi_sas_internal_abort_timeout()
1886 if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) in hisi_sas_internal_abort_timeout()
1887 queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); in hisi_sas_internal_abort_timeout()
1889 if (task->task_state_flags & SAS_TASK_STATE_DONE) { in hisi_sas_internal_abort_timeout()
1891 SAS_ADDR(device->sas_addr)); in hisi_sas_internal_abort_timeout()
1893 struct hisi_sas_slot *slot = task->lldd_task; in hisi_sas_internal_abort_timeout()
1895 set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags); in hisi_sas_internal_abort_timeout()
1899 &hisi_hba->cq[slot->dlvry_queue]; in hisi_sas_internal_abort_timeout()
1904 synchronize_irq(cq->irq_no); in hisi_sas_internal_abort_timeout()
1905 slot->task = NULL; in hisi_sas_internal_abort_timeout()
1908 if (timeout->rst_ha_timeout) { in hisi_sas_internal_abort_timeout()
1910 SAS_ADDR(device->sas_addr)); in hisi_sas_internal_abort_timeout()
1911 queue_work(hisi_hba->wq, &hisi_hba->rst_work); in hisi_sas_internal_abort_timeout()
1914 SAS_ADDR(device->sas_addr)); in hisi_sas_internal_abort_timeout()
1931 struct hisi_hba *hisi_hba = sha->lldd_ha; in hisi_sas_write_gpio()
1933 if (!hisi_hba->hw->write_gpio) in hisi_sas_write_gpio()
1934 return -EOPNOTSUPP; in hisi_sas_write_gpio()
1936 return hisi_hba->hw->write_gpio(hisi_hba, reg_type, in hisi_sas_write_gpio()
1940 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) in hisi_sas_phy_disconnected() argument
1942 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phy_disconnected()
1943 struct sas_phy *sphy = sas_phy->phy; in hisi_sas_phy_disconnected()
1946 phy->phy_attached = 0; in hisi_sas_phy_disconnected()
1947 phy->phy_type = 0; in hisi_sas_phy_disconnected()
1948 phy->port = NULL; in hisi_sas_phy_disconnected()
1950 spin_lock_irqsave(&phy->lock, flags); in hisi_sas_phy_disconnected()
1951 if (phy->enable) in hisi_sas_phy_disconnected()
1952 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; in hisi_sas_phy_disconnected()
1954 sphy->negotiated_linkrate = SAS_PHY_DISABLED; in hisi_sas_phy_disconnected()
1955 spin_unlock_irqrestore(&phy->lock, flags); in hisi_sas_phy_disconnected()
1961 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; in hisi_sas_phy_down() local
1962 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phy_down()
1963 struct device *dev = hisi_hba->dev; in hisi_sas_phy_down()
1966 /* Phy down but ready */ in hisi_sas_phy_down()
1970 struct hisi_sas_port *port = phy->port; in hisi_sas_phy_down()
1972 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) || in hisi_sas_phy_down()
1973 phy->in_reset) { in hisi_sas_phy_down()
1974 dev_info(dev, "ignore flutter phy%d down\n", phy_no); in hisi_sas_phy_down()
1977 /* Phy down and not ready */ in hisi_sas_phy_down()
1982 if (phy->phy_type & PORT_TYPE_SAS) { in hisi_sas_phy_down()
1983 int port_id = port->id; in hisi_sas_phy_down()
1985 if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba, in hisi_sas_phy_down()
1987 port->port_attached = 0; in hisi_sas_phy_down()
1988 } else if (phy->phy_type & PORT_TYPE_SATA) in hisi_sas_phy_down()
1989 port->port_attached = 0; in hisi_sas_phy_down()
1991 hisi_sas_phy_disconnected(phy); in hisi_sas_phy_down()
1996 void hisi_sas_phy_bcast(struct hisi_sas_phy *phy) in hisi_sas_phy_bcast() argument
1998 struct asd_sas_phy *sas_phy = &phy->sas_phy; in hisi_sas_phy_bcast()
1999 struct hisi_hba *hisi_hba = phy->hisi_hba; in hisi_sas_phy_bcast()
2000 struct sas_ha_struct *sha = &hisi_hba->sha; in hisi_sas_phy_bcast()
2002 if (test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) in hisi_sas_phy_bcast()
2005 if (test_bit(SAS_HA_FROZEN, &sha->state)) in hisi_sas_phy_bcast()
2016 for (i = 0; i < hisi_hba->cq_nvecs; i++) { in hisi_sas_sync_irqs()
2017 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; in hisi_sas_sync_irqs()
2019 synchronize_irq(cq->irq_no); in hisi_sas_sync_irqs()
2029 return -EOPNOTSUPP; in hisi_sas_host_reset()
2031 queue_work(hisi_hba->wq, &hisi_hba->rst_work); in hisi_sas_host_reset()
2060 struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; in hisi_sas_init_mem()
2062 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_init_mem()
2063 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; in hisi_sas_init_mem()
2064 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; in hisi_sas_init_mem()
2065 struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i]; in hisi_sas_init_mem()
2071 dq->wr_point = 0; in hisi_sas_init_mem()
2073 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; in hisi_sas_init_mem()
2074 memset(hisi_hba->complete_hdr[i], 0, s); in hisi_sas_init_mem()
2075 cq->rd_point = 0; in hisi_sas_init_mem()
2078 s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy; in hisi_sas_init_mem()
2079 memset(hisi_hba->initial_fis, 0, s); in hisi_sas_init_mem()
2082 memset(hisi_hba->iost, 0, s); in hisi_sas_init_mem()
2085 memset(hisi_hba->breakpoint, 0, s); in hisi_sas_init_mem()
2095 struct device *dev = hisi_hba->dev; in hisi_sas_alloc()
2100 sema_init(&hisi_hba->sem, 1); in hisi_sas_alloc()
2101 spin_lock_init(&hisi_hba->lock); in hisi_sas_alloc()
2102 for (i = 0; i < hisi_hba->n_phy; i++) { in hisi_sas_alloc()
2104 hisi_hba->port[i].port_attached = 0; in hisi_sas_alloc()
2105 hisi_hba->port[i].id = -1; in hisi_sas_alloc()
2109 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; in hisi_sas_alloc()
2110 hisi_hba->devices[i].device_id = i; in hisi_sas_alloc()
2111 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT; in hisi_sas_alloc()
2114 for (i = 0; i < hisi_hba->queue_count; i++) { in hisi_sas_alloc()
2115 struct hisi_sas_cq *cq = &hisi_hba->cq[i]; in hisi_sas_alloc()
2116 struct hisi_sas_dq *dq = &hisi_hba->dq[i]; in hisi_sas_alloc()
2119 cq->id = i; in hisi_sas_alloc()
2120 cq->hisi_hba = hisi_hba; in hisi_sas_alloc()
2123 spin_lock_init(&dq->lock); in hisi_sas_alloc()
2124 INIT_LIST_HEAD(&dq->list); in hisi_sas_alloc()
2125 dq->id = i; in hisi_sas_alloc()
2126 dq->hisi_hba = hisi_hba; in hisi_sas_alloc()
2130 hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s, in hisi_sas_alloc()
2131 &hisi_hba->cmd_hdr_dma[i], in hisi_sas_alloc()
2133 if (!hisi_hba->cmd_hdr[i]) in hisi_sas_alloc()
2137 s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; in hisi_sas_alloc()
2138 hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s, in hisi_sas_alloc()
2139 &hisi_hba->complete_hdr_dma[i], in hisi_sas_alloc()
2141 if (!hisi_hba->complete_hdr[i]) in hisi_sas_alloc()
2146 hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, in hisi_sas_alloc()
2148 if (!hisi_hba->itct) in hisi_sas_alloc()
2151 hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries, in hisi_sas_alloc()
2154 if (!hisi_hba->slot_info) in hisi_sas_alloc()
2159 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK) in hisi_sas_alloc()
2181 slot = &hisi_hba->slot_info[slot_index]; in hisi_sas_alloc()
2182 slot->buf = buf; in hisi_sas_alloc()
2183 slot->buf_dma = buf_dma; in hisi_sas_alloc()
2184 slot->idx = slot_index; in hisi_sas_alloc()
2192 hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma, in hisi_sas_alloc()
2194 if (!hisi_hba->iost) in hisi_sas_alloc()
2198 hisi_hba->breakpoint = dmam_alloc_coherent(dev, s, in hisi_sas_alloc()
2199 &hisi_hba->breakpoint_dma, in hisi_sas_alloc()
2201 if (!hisi_hba->breakpoint) in hisi_sas_alloc()
2204 s = hisi_hba->slot_index_count = max_command_entries; in hisi_sas_alloc()
2205 hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL); in hisi_sas_alloc()
2206 if (!hisi_hba->slot_index_tags) in hisi_sas_alloc()
2210 hisi_hba->initial_fis = dmam_alloc_coherent(dev, s, in hisi_sas_alloc()
2211 &hisi_hba->initial_fis_dma, in hisi_sas_alloc()
2213 if (!hisi_hba->initial_fis) in hisi_sas_alloc()
2217 hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s, in hisi_sas_alloc()
2218 &hisi_hba->sata_breakpoint_dma, in hisi_sas_alloc()
2220 if (!hisi_hba->sata_breakpoint) in hisi_sas_alloc()
2223 hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; in hisi_sas_alloc()
2225 hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); in hisi_sas_alloc()
2226 if (!hisi_hba->wq) { in hisi_sas_alloc()
2233 return -ENOMEM; in hisi_sas_alloc()
2241 for (i = 0; i < hisi_hba->n_phy; i++) { in hisi_sas_free()
2242 struct hisi_sas_phy *phy = &hisi_hba->phy[i]; in hisi_sas_free() local
2244 del_timer_sync(&phy->timer); in hisi_sas_free()
2247 if (hisi_hba->wq) in hisi_sas_free()
2248 destroy_workqueue(hisi_hba->wq); in hisi_sas_free()
2269 if (hisi_sas_controller_prereset(rst->hisi_hba)) in hisi_sas_sync_rst_work_handler()
2272 if (!hisi_sas_controller_reset(rst->hisi_hba)) in hisi_sas_sync_rst_work_handler()
2273 rst->done = true; in hisi_sas_sync_rst_work_handler()
2275 complete(rst->completion); in hisi_sas_sync_rst_work_handler()
2281 struct device *dev = hisi_hba->dev; in hisi_sas_get_fw_info()
2282 struct platform_device *pdev = hisi_hba->platform_dev; in hisi_sas_get_fw_info()
2283 struct device_node *np = pdev ? pdev->dev.of_node : NULL; in hisi_sas_get_fw_info()
2286 if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr, in hisi_sas_get_fw_info()
2288 dev_err(dev, "could not get property sas-addr\n"); in hisi_sas_get_fw_info()
2289 return -ENOENT; in hisi_sas_get_fw_info()
2294 * These properties are only required for platform device-based in hisi_sas_get_fw_info()
2297 hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np, in hisi_sas_get_fw_info()
2298 "hisilicon,sas-syscon"); in hisi_sas_get_fw_info()
2299 if (IS_ERR(hisi_hba->ctrl)) { in hisi_sas_get_fw_info()
2301 return -ENOENT; in hisi_sas_get_fw_info()
2304 if (device_property_read_u32(dev, "ctrl-reset-reg", in hisi_sas_get_fw_info()
2305 &hisi_hba->ctrl_reset_reg)) { in hisi_sas_get_fw_info()
2306 dev_err(dev, "could not get property ctrl-reset-reg\n"); in hisi_sas_get_fw_info()
2307 return -ENOENT; in hisi_sas_get_fw_info()
2310 if (device_property_read_u32(dev, "ctrl-reset-sts-reg", in hisi_sas_get_fw_info()
2311 &hisi_hba->ctrl_reset_sts_reg)) { in hisi_sas_get_fw_info()
2312 dev_err(dev, "could not get property ctrl-reset-sts-reg\n"); in hisi_sas_get_fw_info()
2313 return -ENOENT; in hisi_sas_get_fw_info()
2316 if (device_property_read_u32(dev, "ctrl-clock-ena-reg", in hisi_sas_get_fw_info()
2317 &hisi_hba->ctrl_clock_ena_reg)) { in hisi_sas_get_fw_info()
2318 dev_err(dev, "could not get property ctrl-clock-ena-reg\n"); in hisi_sas_get_fw_info()
2319 return -ENOENT; in hisi_sas_get_fw_info()
2327 hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000; in hisi_sas_get_fw_info()
2329 if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) { in hisi_sas_get_fw_info()
2330 dev_err(dev, "could not get property phy-count\n"); in hisi_sas_get_fw_info()
2331 return -ENOENT; in hisi_sas_get_fw_info()
2334 if (device_property_read_u32(dev, "queue-count", in hisi_sas_get_fw_info()
2335 &hisi_hba->queue_count)) { in hisi_sas_get_fw_info()
2336 dev_err(dev, "could not get property queue-count\n"); in hisi_sas_get_fw_info()
2337 return -ENOENT; in hisi_sas_get_fw_info()
2350 struct device *dev = &pdev->dev; in hisi_sas_shost_alloc()
2353 shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba)); in hisi_sas_shost_alloc()
2360 INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler); in hisi_sas_shost_alloc()
2361 hisi_hba->hw = hw; in hisi_sas_shost_alloc()
2362 hisi_hba->dev = dev; in hisi_sas_shost_alloc()
2363 hisi_hba->platform_dev = pdev; in hisi_sas_shost_alloc()
2364 hisi_hba->shost = shost; in hisi_sas_shost_alloc()
2365 SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; in hisi_sas_shost_alloc()
2367 timer_setup(&hisi_hba->timer, NULL, 0); in hisi_sas_shost_alloc()
2378 hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); in hisi_sas_shost_alloc()
2379 if (IS_ERR(hisi_hba->regs)) in hisi_sas_shost_alloc()
2384 hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res); in hisi_sas_shost_alloc()
2385 if (IS_ERR(hisi_hba->sgpio_regs)) in hisi_sas_shost_alloc()
2403 if (hisi_hba->hw->interrupt_preinit) in hisi_sas_interrupt_preinit()
2404 return hisi_hba->hw->interrupt_preinit(hisi_hba); in hisi_sas_interrupt_preinit()
2413 struct device *dev = &pdev->dev; in hisi_sas_probe()
2421 return -ENOMEM; in hisi_sas_probe()
2427 phy_nr = port_nr = hisi_hba->n_phy; in hisi_sas_probe()
2432 rc = -ENOMEM; in hisi_sas_probe()
2436 sha->sas_phy = arr_phy; in hisi_sas_probe()
2437 sha->sas_port = arr_port; in hisi_sas_probe()
2438 sha->lldd_ha = hisi_hba; in hisi_sas_probe()
2440 shost->transportt = hisi_sas_stt; in hisi_sas_probe()
2441 shost->max_id = HISI_SAS_MAX_DEVICES; in hisi_sas_probe()
2442 shost->max_lun = ~0; in hisi_sas_probe()
2443 shost->max_channel = 1; in hisi_sas_probe()
2444 shost->max_cmd_len = 16; in hisi_sas_probe()
2445 if (hisi_hba->hw->slot_index_alloc) { in hisi_sas_probe()
2446 shost->can_queue = HISI_SAS_MAX_COMMANDS; in hisi_sas_probe()
2447 shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; in hisi_sas_probe()
2449 shost->can_queue = HISI_SAS_UNRESERVED_IPTT; in hisi_sas_probe()
2450 shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; in hisi_sas_probe()
2453 sha->sas_ha_name = DRV_NAME; in hisi_sas_probe()
2454 sha->dev = hisi_hba->dev; in hisi_sas_probe()
2455 sha->lldd_module = THIS_MODULE; in hisi_sas_probe()
2456 sha->sas_addr = &hisi_hba->sas_addr[0]; in hisi_sas_probe()
2457 sha->num_phys = hisi_hba->n_phy; in hisi_sas_probe()
2458 sha->core.shost = hisi_hba->shost; in hisi_sas_probe()
2460 for (i = 0; i < hisi_hba->n_phy; i++) { in hisi_sas_probe()
2461 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; in hisi_sas_probe()
2462 sha->sas_port[i] = &hisi_hba->port[i].sas_port; in hisi_sas_probe()
2469 rc = scsi_add_host(shost, &pdev->dev); in hisi_sas_probe()
2477 rc = hisi_hba->hw->hw_init(hisi_hba); in hisi_sas_probe()
2499 struct hisi_hba *hisi_hba = sha->lldd_ha; in hisi_sas_remove()
2500 struct Scsi_Host *shost = sha->core.shost; in hisi_sas_remove()
2502 del_timer_sync(&hisi_hba->timer); in hisi_sas_remove()
2505 sas_remove_host(sha->core.shost); in hisi_sas_remove()
2539 return -ENOMEM; in hisi_sas_init()