Lines Matching +full:sha +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-only
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
14 if (task->lldd_task) { in mvs_find_tag()
16 slot = task->lldd_task; in mvs_find_tag()
17 *tag = slot->slot_tag; in mvs_find_tag()
18 return 1; in mvs_find_tag()
25 void *bitmap = mvi->tags; in mvs_tag_clear()
36 void *bitmap = mvi->tags; in mvs_tag_set()
43 void *bitmap = mvi->tags; in mvs_tag_alloc()
45 index = find_first_zero_bit(bitmap, mvi->tags_num); in mvs_tag_alloc()
47 if (tag >= mvi->tags_num) in mvs_tag_alloc()
48 return -SAS_QUEUE_FULL; in mvs_tag_alloc()
57 for (i = 0; i < mvi->tags_num; ++i) in mvs_tag_init()
64 struct sas_ha_struct *sha = dev->port->ha; in mvs_find_dev_mvi() local
68 while (sha->sas_port[i]) { in mvs_find_dev_mvi()
69 if (sha->sas_port[i] == dev->port) { in mvs_find_dev_mvi()
70 spin_lock(&sha->sas_port[i]->phy_list_lock); in mvs_find_dev_mvi()
71 phy = container_of(sha->sas_port[i]->phy_list.next, in mvs_find_dev_mvi()
73 spin_unlock(&sha->sas_port[i]->phy_list_lock); in mvs_find_dev_mvi()
75 while (sha->sas_phy[j]) { in mvs_find_dev_mvi()
76 if (sha->sas_phy[j] == phy) in mvs_find_dev_mvi()
84 hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; in mvs_find_dev_mvi()
85 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; in mvs_find_dev_mvi()
94 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; in mvs_find_dev_phyno()
95 struct mvs_info *mvi = mvi_dev->mvi_info; in mvs_find_dev_phyno()
96 struct sas_ha_struct *sha = dev->port->ha; in mvs_find_dev_phyno() local
98 while (sha->sas_port[i]) { in mvs_find_dev_phyno()
99 if (sha->sas_port[i] == dev->port) { in mvs_find_dev_phyno()
102 spin_lock(&sha->sas_port[i]->phy_list_lock); in mvs_find_dev_phyno()
104 &sha->sas_port[i]->phy_list, port_phy_el) { in mvs_find_dev_phyno()
106 while (sha->sas_phy[j]) { in mvs_find_dev_phyno()
107 if (sha->sas_phy[j] == phy) in mvs_find_dev_phyno()
111 phyno[n] = (j >= mvi->chip->n_phy) ? in mvs_find_dev_phyno()
112 (j - mvi->chip->n_phy) : j; in mvs_find_dev_phyno()
116 spin_unlock(&sha->sas_port[i]->phy_list_lock); in mvs_find_dev_phyno()
129 if (mvi->devices[dev_no].taskfileset == MVS_ID_NOT_MAPPED) in mvs_find_dev_by_reg_set()
132 if (mvi->devices[dev_no].taskfileset == reg_set) in mvs_find_dev_by_reg_set()
133 return &mvi->devices[dev_no]; in mvs_find_dev_by_reg_set()
145 if (dev->taskfileset == MVS_ID_NOT_MAPPED) in mvs_free_reg_set()
147 MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset); in mvs_free_reg_set()
153 if (dev->taskfileset != MVS_ID_NOT_MAPPED) in mvs_assign_reg_set()
155 return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset); in mvs_assign_reg_set()
162 if (!(phy_mask & 1)) in mvs_phys_reset()
164 MVS_CHIP_DISP->phy_reset(mvi, no, hard); in mvs_phys_reset()
171 int rc = 0, phy_id = sas_phy->id; in mvs_phy_control()
173 struct sas_ha_struct *sha = sas_phy->ha; in mvs_phy_control() local
176 while (sha->sas_phy[i]) { in mvs_phy_control()
177 if (sha->sas_phy[i] == sas_phy) in mvs_phy_control()
181 hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy; in mvs_phy_control()
182 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi]; in mvs_phy_control()
186 MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata); in mvs_phy_control()
190 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id); in mvs_phy_control()
193 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_HARD_RESET); in mvs_phy_control()
197 MVS_CHIP_DISP->phy_enable(mvi, phy_id); in mvs_phy_control()
198 MVS_CHIP_DISP->phy_reset(mvi, phy_id, MVS_SOFT_RESET); in mvs_phy_control()
202 MVS_CHIP_DISP->phy_disable(mvi, phy_id); in mvs_phy_control()
206 rc = -ENOSYS; in mvs_phy_control()
218 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo); in mvs_set_sas_addr()
219 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo); in mvs_set_sas_addr()
220 MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi); in mvs_set_sas_addr()
221 MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi); in mvs_set_sas_addr()
226 struct mvs_phy *phy = &mvi->phy[i]; in mvs_bytes_dmaed()
227 struct asd_sas_phy *sas_phy = &phy->sas_phy; in mvs_bytes_dmaed()
229 if (!phy->phy_attached) in mvs_bytes_dmaed()
232 if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK) in mvs_bytes_dmaed()
233 && phy->phy_type & PORT_TYPE_SAS) { in mvs_bytes_dmaed()
239 if (sas_phy->phy) { in mvs_bytes_dmaed()
240 struct sas_phy *sphy = sas_phy->phy; in mvs_bytes_dmaed()
242 sphy->negotiated_linkrate = sas_phy->linkrate; in mvs_bytes_dmaed()
243 sphy->minimum_linkrate = phy->minimum_linkrate; in mvs_bytes_dmaed()
244 sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; in mvs_bytes_dmaed()
245 sphy->maximum_linkrate = phy->maximum_linkrate; in mvs_bytes_dmaed()
246 sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate(); in mvs_bytes_dmaed()
249 if (phy->phy_type & PORT_TYPE_SAS) { in mvs_bytes_dmaed()
252 id = (struct sas_identify_frame *)phy->frame_rcvd; in mvs_bytes_dmaed()
253 id->dev_type = phy->identify.device_type; in mvs_bytes_dmaed()
254 id->initiator_bits = SAS_PROTOCOL_ALL; in mvs_bytes_dmaed()
255 id->target_bits = phy->identify.target_port_protocols; in mvs_bytes_dmaed()
258 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { in mvs_bytes_dmaed()
259 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); in mvs_bytes_dmaed()
260 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); in mvs_bytes_dmaed()
262 } else if (phy->phy_type & PORT_TYPE_SATA) { in mvs_bytes_dmaed()
265 mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy); in mvs_bytes_dmaed()
267 sas_phy->frame_rcvd_size = phy->frame_rcvd_size; in mvs_bytes_dmaed()
277 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in mvs_scan_start() local
278 struct mvs_prv_info *mvs_prv = sha->lldd_ha; in mvs_scan_start()
280 core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host; in mvs_scan_start()
283 mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j]; in mvs_scan_start()
284 for (i = 0; i < mvi->chip->n_phy; ++i) in mvs_scan_start()
287 mvs_prv->scan_finished = 1; in mvs_scan_start()
292 struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); in mvs_scan_finished() local
293 struct mvs_prv_info *mvs_prv = sha->lldd_ha; in mvs_scan_finished()
295 if (mvs_prv->scan_finished == 0) in mvs_scan_finished()
298 sas_drain_work(sha); in mvs_scan_finished()
299 return 1; in mvs_scan_finished()
306 struct sas_ha_struct *sha = mvi->sas; in mvs_task_prep_smp() local
307 struct sas_task *task = tei->task; in mvs_task_prep_smp()
308 struct mvs_cmd_hdr *hdr = tei->hdr; in mvs_task_prep_smp()
309 struct domain_device *dev = task->dev; in mvs_task_prep_smp()
310 struct asd_sas_port *sas_port = dev->port; in mvs_task_prep_smp()
311 struct sas_phy *sphy = dev->phy; in mvs_task_prep_smp()
312 struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number]; in mvs_task_prep_smp()
314 u32 req_len, resp_len, tag = tei->tag; in mvs_task_prep_smp()
319 struct mvs_slot_info *slot = &mvi->slot_info[tag]; in mvs_task_prep_smp()
320 u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); in mvs_task_prep_smp()
323 * DMA-map SMP request, response buffers in mvs_task_prep_smp()
325 sg_req = &task->smp_task.smp_req; in mvs_task_prep_smp()
326 elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE); in mvs_task_prep_smp()
328 return -ENOMEM; in mvs_task_prep_smp()
331 sg_resp = &task->smp_task.smp_resp; in mvs_task_prep_smp()
332 elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE); in mvs_task_prep_smp()
334 rc = -ENOMEM; in mvs_task_prep_smp()
341 rc = -EINVAL; in mvs_task_prep_smp()
346 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs in mvs_task_prep_smp()
349 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */ in mvs_task_prep_smp()
350 buf_tmp = slot->buf; in mvs_task_prep_smp()
351 buf_tmp_dma = slot->buf_dma; in mvs_task_prep_smp()
353 hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req)); in mvs_task_prep_smp()
357 hdr->open_frame = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_smp()
364 if (tei->n_elem) in mvs_task_prep_smp()
365 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_smp()
367 hdr->prd_tbl = 0; in mvs_task_prep_smp()
369 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; in mvs_task_prep_smp()
374 slot->response = buf_tmp; in mvs_task_prep_smp()
375 hdr->status_buf = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_smp()
376 if (mvi->flags & MVF_FLAG_SOC) in mvs_task_prep_smp()
377 hdr->reserved[0] = 0; in mvs_task_prep_smp()
382 slot->tx = mvi->tx_prod; in mvs_task_prep_smp()
383 mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) | in mvs_task_prep_smp()
387 hdr->flags |= flags; in mvs_task_prep_smp()
388 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4)); in mvs_task_prep_smp()
389 hdr->tags = cpu_to_le32(tag); in mvs_task_prep_smp()
390 hdr->data_len = 0; in mvs_task_prep_smp()
393 /* initiator, SMP, ftype 1h */ in mvs_task_prep_smp()
394 buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; in mvs_task_prep_smp()
395 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; in mvs_task_prep_smp()
397 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); in mvs_task_prep_smp()
400 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); in mvs_task_prep_smp()
405 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1, in mvs_task_prep_smp()
408 dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1, in mvs_task_prep_smp()
415 struct ata_queued_cmd *qc = task->uldd_task; in mvs_get_ncq_tag()
418 if (qc->tf.command == ATA_CMD_FPDMA_WRITE || in mvs_get_ncq_tag()
419 qc->tf.command == ATA_CMD_FPDMA_READ || in mvs_get_ncq_tag()
420 qc->tf.command == ATA_CMD_FPDMA_RECV || in mvs_get_ncq_tag()
421 qc->tf.command == ATA_CMD_FPDMA_SEND || in mvs_get_ncq_tag()
422 qc->tf.command == ATA_CMD_NCQ_NON_DATA) { in mvs_get_ncq_tag()
423 *tag = qc->tag; in mvs_get_ncq_tag()
424 return 1; in mvs_get_ncq_tag()
434 struct sas_task *task = tei->task; in mvs_task_prep_ata()
435 struct domain_device *dev = task->dev; in mvs_task_prep_ata()
436 struct mvs_device *mvi_dev = dev->lldd_dev; in mvs_task_prep_ata()
437 struct mvs_cmd_hdr *hdr = tei->hdr; in mvs_task_prep_ata()
438 struct asd_sas_port *sas_port = dev->port; in mvs_task_prep_ata()
441 u32 tag = tei->tag, hdr_tag; in mvs_task_prep_ata()
451 mvi_dev->device_id); in mvs_task_prep_ata()
452 return -EBUSY; in mvs_task_prep_ata()
454 slot = &mvi->slot_info[tag]; in mvs_task_prep_ata()
455 slot->tx = mvi->tx_prod; in mvs_task_prep_ata()
458 ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) | in mvs_task_prep_ata()
459 (mvi_dev->taskfileset << TXQ_SRS_SHIFT); in mvs_task_prep_ata()
460 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q); in mvs_task_prep_ata()
462 if (task->data_dir == DMA_FROM_DEVICE) in mvs_task_prep_ata()
463 flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT); in mvs_task_prep_ata()
465 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT); in mvs_task_prep_ata()
467 if (task->ata_task.use_ncq) in mvs_task_prep_ata()
469 if (dev->sata_dev.class == ATA_DEV_ATAPI) { in mvs_task_prep_ata()
470 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) in mvs_task_prep_ata()
474 hdr->flags = cpu_to_le32(flags); in mvs_task_prep_ata()
476 if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag)) in mvs_task_prep_ata()
477 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); in mvs_task_prep_ata()
481 hdr->tags = cpu_to_le32(hdr_tag); in mvs_task_prep_ata()
483 hdr->data_len = cpu_to_le32(task->total_xfer_len); in mvs_task_prep_ata()
486 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs in mvs_task_prep_ata()
489 /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */ in mvs_task_prep_ata()
490 buf_cmd = buf_tmp = slot->buf; in mvs_task_prep_ata()
491 buf_tmp_dma = slot->buf_dma; in mvs_task_prep_ata()
493 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ata()
501 hdr->open_frame = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ata()
509 if (tei->n_elem) in mvs_task_prep_ata()
510 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ata()
512 hdr->prd_tbl = 0; in mvs_task_prep_ata()
513 i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count(); in mvs_task_prep_ata()
519 slot->response = buf_tmp; in mvs_task_prep_ata()
520 hdr->status_buf = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ata()
521 if (mvi->flags & MVF_FLAG_SOC) in mvs_task_prep_ata()
522 hdr->reserved[0] = 0; in mvs_task_prep_ata()
525 resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ - in mvs_task_prep_ata()
526 sizeof(struct mvs_err_info) - i; in mvs_task_prep_ata()
530 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); in mvs_task_prep_ata()
532 if (likely(!task->ata_task.device_control_reg_update)) in mvs_task_prep_ata()
533 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ in mvs_task_prep_ata()
535 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); in mvs_task_prep_ata()
536 if (dev->sata_dev.class == ATA_DEV_ATAPI) in mvs_task_prep_ata()
538 task->ata_task.atapi_packet, 16); in mvs_task_prep_ata()
541 /* initiator, STP, ftype 1h */ in mvs_task_prep_ata()
542 buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; in mvs_task_prep_ata()
543 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; in mvs_task_prep_ata()
544 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); in mvs_task_prep_ata()
545 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); in mvs_task_prep_ata()
548 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); in mvs_task_prep_ata()
550 if (task->data_dir == DMA_FROM_DEVICE) in mvs_task_prep_ata()
551 MVS_CHIP_DISP->dma_fix(mvi, sas_port->phy_mask, in mvs_task_prep_ata()
552 TRASH_BUCKET_SIZE, tei->n_elem, buf_prd); in mvs_task_prep_ata()
561 struct sas_task *task = tei->task; in mvs_task_prep_ssp()
562 struct mvs_cmd_hdr *hdr = tei->hdr; in mvs_task_prep_ssp()
563 struct mvs_port *port = tei->port; in mvs_task_prep_ssp()
564 struct domain_device *dev = task->dev; in mvs_task_prep_ssp()
565 struct mvs_device *mvi_dev = dev->lldd_dev; in mvs_task_prep_ssp()
566 struct asd_sas_port *sas_port = dev->port; in mvs_task_prep_ssp()
574 u32 resp_len, req_len, i, tag = tei->tag; in mvs_task_prep_ssp()
578 slot = &mvi->slot_info[tag]; in mvs_task_prep_ssp()
580 phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap : in mvs_task_prep_ssp()
581 sas_port->phy_mask) & TXQ_PHY_MASK; in mvs_task_prep_ssp()
583 slot->tx = mvi->tx_prod; in mvs_task_prep_ssp()
584 mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag | in mvs_task_prep_ssp()
589 if (task->ssp_task.enable_first_burst) { in mvs_task_prep_ssp()
591 fburst = (1 << 7); in mvs_task_prep_ssp()
598 hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT)); in mvs_task_prep_ssp()
599 hdr->tags = cpu_to_le32(tag); in mvs_task_prep_ssp()
600 hdr->data_len = cpu_to_le32(task->total_xfer_len); in mvs_task_prep_ssp()
603 * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs in mvs_task_prep_ssp()
606 /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */ in mvs_task_prep_ssp()
607 buf_cmd = buf_tmp = slot->buf; in mvs_task_prep_ssp()
608 buf_tmp_dma = slot->buf_dma; in mvs_task_prep_ssp()
610 hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ssp()
617 hdr->open_frame = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ssp()
624 if (tei->n_elem) in mvs_task_prep_ssp()
625 hdr->prd_tbl = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ssp()
627 hdr->prd_tbl = 0; in mvs_task_prep_ssp()
629 i = MVS_CHIP_DISP->prd_size() * tei->n_elem; in mvs_task_prep_ssp()
634 slot->response = buf_tmp; in mvs_task_prep_ssp()
635 hdr->status_buf = cpu_to_le64(buf_tmp_dma); in mvs_task_prep_ssp()
636 if (mvi->flags & MVF_FLAG_SOC) in mvs_task_prep_ssp()
637 hdr->reserved[0] = 0; in mvs_task_prep_ssp()
639 resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ - in mvs_task_prep_ssp()
640 sizeof(struct mvs_err_info) - i; in mvs_task_prep_ssp()
646 hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4)); in mvs_task_prep_ssp()
649 /* initiator, SSP, ftype 1h */ in mvs_task_prep_ssp()
650 buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; in mvs_task_prep_ssp()
651 buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; in mvs_task_prep_ssp()
652 *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); in mvs_task_prep_ssp()
653 memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); in mvs_task_prep_ssp()
659 ssp_hdr->frame_type = SSP_TASK; in mvs_task_prep_ssp()
661 ssp_hdr->frame_type = SSP_COMMAND; in mvs_task_prep_ssp()
663 memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr, in mvs_task_prep_ssp()
665 memcpy(ssp_hdr->hashed_src_addr, in mvs_task_prep_ssp()
666 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); in mvs_task_prep_ssp()
667 ssp_hdr->tag = cpu_to_be16(tag); in mvs_task_prep_ssp()
671 memcpy(buf_cmd, &task->ssp_task.LUN, 8); in mvs_task_prep_ssp()
673 if (ssp_hdr->frame_type != SSP_TASK) { in mvs_task_prep_ssp()
674 buf_cmd[9] = fburst | task->ssp_task.task_attr | in mvs_task_prep_ssp()
675 (task->ssp_task.task_prio << 3); in mvs_task_prep_ssp()
676 memcpy(buf_cmd + 12, task->ssp_task.cmd->cmnd, in mvs_task_prep_ssp()
677 task->ssp_task.cmd->cmd_len); in mvs_task_prep_ssp()
679 buf_cmd[10] = tmf->tmf; in mvs_task_prep_ssp()
680 switch (tmf->tmf) { in mvs_task_prep_ssp()
684 (tmf->tag_of_task_to_be_managed >> 8) & 0xff; in mvs_task_prep_ssp()
686 tmf->tag_of_task_to_be_managed & 0xff; in mvs_task_prep_ssp()
693 MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd); in mvs_task_prep_ssp()
697 #define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED)))
701 struct domain_device *dev = task->dev; in mvs_task_prep()
702 struct mvs_device *mvi_dev = dev->lldd_dev; in mvs_task_prep()
708 if (!dev->port) { in mvs_task_prep()
709 struct task_status_struct *tsm = &task->task_status; in mvs_task_prep()
711 tsm->resp = SAS_TASK_UNDELIVERED; in mvs_task_prep()
712 tsm->stat = SAS_PHY_DOWN; in mvs_task_prep()
714 * libsas will use dev->port, should in mvs_task_prep()
717 if (dev->dev_type != SAS_SATA_DEV) in mvs_task_prep()
718 task->task_done(task); in mvs_task_prep()
725 mvi_dev->device_id); in mvs_task_prep()
728 SAS_ADDR(dev->sas_addr)); in mvs_task_prep()
733 tei.port = dev->port->lldd_port; in mvs_task_prep()
734 if (tei.port && !tei.port->port_attached && !tmf) { in mvs_task_prep()
735 if (sas_protocol_ata(task->task_proto)) { in mvs_task_prep()
736 struct task_status_struct *ts = &task->task_status; in mvs_task_prep()
738 "device.\n", dev->port->id); in mvs_task_prep()
739 ts->resp = SAS_TASK_COMPLETE; in mvs_task_prep()
740 ts->stat = SAS_PHY_DOWN; in mvs_task_prep()
742 task->task_done(task); in mvs_task_prep()
745 struct task_status_struct *ts = &task->task_status; in mvs_task_prep()
747 "device.\n", dev->port->id); in mvs_task_prep()
748 ts->resp = SAS_TASK_UNDELIVERED; in mvs_task_prep()
749 ts->stat = SAS_PHY_DOWN; in mvs_task_prep()
750 task->task_done(task); in mvs_task_prep()
755 if (!sas_protocol_ata(task->task_proto)) { in mvs_task_prep()
756 if (task->num_scatter) { in mvs_task_prep()
757 n_elem = dma_map_sg(mvi->dev, in mvs_task_prep()
758 task->scatter, in mvs_task_prep()
759 task->num_scatter, in mvs_task_prep()
760 task->data_dir); in mvs_task_prep()
762 rc = -ENOMEM; in mvs_task_prep()
767 n_elem = task->num_scatter; in mvs_task_prep()
774 slot = &mvi->slot_info[tag]; in mvs_task_prep()
776 task->lldd_task = NULL; in mvs_task_prep()
777 slot->n_elem = n_elem; in mvs_task_prep()
778 slot->slot_tag = tag; in mvs_task_prep()
780 slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma); in mvs_task_prep()
781 if (!slot->buf) { in mvs_task_prep()
782 rc = -ENOMEM; in mvs_task_prep()
787 tei.hdr = &mvi->slot[tag]; in mvs_task_prep()
790 switch (task->task_proto) { in mvs_task_prep()
803 dev_printk(KERN_ERR, mvi->dev, in mvs_task_prep()
805 task->task_proto); in mvs_task_prep()
806 rc = -EINVAL; in mvs_task_prep()
814 slot->task = task; in mvs_task_prep()
815 slot->port = tei.port; in mvs_task_prep()
816 task->lldd_task = slot; in mvs_task_prep()
817 list_add_tail(&slot->entry, &tei.port->list); in mvs_task_prep()
819 mvi_dev->running_req++; in mvs_task_prep()
821 mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1); in mvs_task_prep()
826 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); in mvs_task_prep()
831 dev_printk(KERN_ERR, mvi->dev, "mvsas prep failed[%d]!\n", rc); in mvs_task_prep()
832 if (!sas_protocol_ata(task->task_proto)) in mvs_task_prep()
834 dma_unmap_sg(mvi->dev, task->scatter, n_elem, in mvs_task_prep()
835 task->data_dir); in mvs_task_prep()
846 struct sas_tmf_task *tmf = task->tmf; in mvs_queue_command()
847 int is_tmf = !!task->tmf; in mvs_queue_command()
849 mvi = ((struct mvs_device *)task->dev->lldd_dev)->mvi_info; in mvs_queue_command()
851 spin_lock_irqsave(&mvi->lock, flags); in mvs_queue_command()
854 dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc); in mvs_queue_command()
857 MVS_CHIP_DISP->start_delivery(mvi, (mvi->tx_prod - 1) & in mvs_queue_command()
858 (MVS_CHIP_SLOT_SZ - 1)); in mvs_queue_command()
859 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_queue_command()
875 if (!slot->task) in mvs_slot_task_free()
877 if (!sas_protocol_ata(task->task_proto)) in mvs_slot_task_free()
878 if (slot->n_elem) in mvs_slot_task_free()
879 dma_unmap_sg(mvi->dev, task->scatter, in mvs_slot_task_free()
880 slot->n_elem, task->data_dir); in mvs_slot_task_free()
882 switch (task->task_proto) { in mvs_slot_task_free()
884 dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1, in mvs_slot_task_free()
886 dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1, in mvs_slot_task_free()
898 if (slot->buf) { in mvs_slot_task_free()
899 dma_pool_free(mvi->dma_pool, slot->buf, slot->buf_dma); in mvs_slot_task_free()
900 slot->buf = NULL; in mvs_slot_task_free()
902 list_del_init(&slot->entry); in mvs_slot_task_free()
903 task->lldd_task = NULL; in mvs_slot_task_free()
904 slot->task = NULL; in mvs_slot_task_free()
905 slot->port = NULL; in mvs_slot_task_free()
906 slot->slot_tag = 0xFFFFFFFF; in mvs_slot_task_free()
912 struct mvs_phy *phy = &mvi->phy[phy_no]; in mvs_update_wideport()
913 struct mvs_port *port = phy->port; in mvs_update_wideport()
916 for_each_phy(port->wide_port_phymap, j, no) { in mvs_update_wideport()
917 if (j & 1) { in mvs_update_wideport()
918 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, in mvs_update_wideport()
920 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, in mvs_update_wideport()
921 port->wide_port_phymap); in mvs_update_wideport()
923 MVS_CHIP_DISP->write_port_cfg_addr(mvi, no, in mvs_update_wideport()
925 MVS_CHIP_DISP->write_port_cfg_data(mvi, no, in mvs_update_wideport()
934 struct mvs_phy *phy = &mvi->phy[i]; in mvs_is_phy_ready()
935 struct mvs_port *port = phy->port; in mvs_is_phy_ready()
937 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i); in mvs_is_phy_ready()
938 if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) { in mvs_is_phy_ready()
940 phy->phy_attached = 1; in mvs_is_phy_ready()
945 if (phy->phy_type & PORT_TYPE_SAS) { in mvs_is_phy_ready()
946 port->wide_port_phymap &= ~(1U << i); in mvs_is_phy_ready()
947 if (!port->wide_port_phymap) in mvs_is_phy_ready()
948 port->port_attached = 0; in mvs_is_phy_ready()
950 } else if (phy->phy_type & PORT_TYPE_SATA) in mvs_is_phy_ready()
951 port->port_attached = 0; in mvs_is_phy_ready()
952 phy->port = NULL; in mvs_is_phy_ready()
953 phy->phy_attached = 0; in mvs_is_phy_ready()
954 phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA); in mvs_is_phy_ready()
966 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3); in mvs_get_d2h_reg()
967 s[3] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); in mvs_get_d2h_reg()
969 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2); in mvs_get_d2h_reg()
970 s[2] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); in mvs_get_d2h_reg()
972 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1); in mvs_get_d2h_reg()
973 s[1] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); in mvs_get_d2h_reg()
975 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0); in mvs_get_d2h_reg()
976 s[0] = cpu_to_le32(MVS_CHIP_DISP->read_port_cfg_data(mvi, i)); in mvs_get_d2h_reg()
978 if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01)) in mvs_get_d2h_reg()
979 s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10); in mvs_get_d2h_reg()
991 if (phy->timer.function) in mvs_sig_remove_timer()
992 del_timer(&phy->timer); in mvs_sig_remove_timer()
993 phy->timer.function = NULL; in mvs_sig_remove_timer()
998 struct mvs_phy *phy = &mvi->phy[i]; in mvs_update_phyinfo()
1001 id = (struct sas_identify_frame *)phy->frame_rcvd; in mvs_update_phyinfo()
1004 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i); in mvs_update_phyinfo()
1005 phy->phy_status = mvs_is_phy_ready(mvi, i); in mvs_update_phyinfo()
1008 if (phy->phy_status) { in mvs_update_phyinfo()
1010 struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy; in mvs_update_phyinfo()
1012 oob_done = MVS_CHIP_DISP->oob_done(mvi, i); in mvs_update_phyinfo()
1014 MVS_CHIP_DISP->fix_phy_info(mvi, i, id); in mvs_update_phyinfo()
1015 if (phy->phy_type & PORT_TYPE_SATA) { in mvs_update_phyinfo()
1016 phy->identify.target_port_protocols = SAS_PROTOCOL_STP; in mvs_update_phyinfo()
1017 if (mvs_is_sig_fis_received(phy->irq_status)) { in mvs_update_phyinfo()
1019 phy->phy_attached = 1; in mvs_update_phyinfo()
1020 phy->att_dev_sas_addr = in mvs_update_phyinfo()
1021 i + mvi->id * mvi->chip->n_phy; in mvs_update_phyinfo()
1023 sas_phy->oob_mode = SATA_OOB_MODE; in mvs_update_phyinfo()
1024 phy->frame_rcvd_size = in mvs_update_phyinfo()
1029 dev_printk(KERN_DEBUG, mvi->dev, in mvs_update_phyinfo()
1031 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i); in mvs_update_phyinfo()
1032 MVS_CHIP_DISP->write_port_irq_mask(mvi, i, in mvs_update_phyinfo()
1034 phy->phy_attached = 0; in mvs_update_phyinfo()
1035 phy->phy_type &= ~PORT_TYPE_SATA; in mvs_update_phyinfo()
1038 } else if (phy->phy_type & PORT_TYPE_SAS in mvs_update_phyinfo()
1039 || phy->att_dev_info & PORT_SSP_INIT_MASK) { in mvs_update_phyinfo()
1040 phy->phy_attached = 1; in mvs_update_phyinfo()
1041 phy->identify.device_type = in mvs_update_phyinfo()
1042 phy->att_dev_info & PORT_DEV_TYPE_MASK; in mvs_update_phyinfo()
1044 if (phy->identify.device_type == SAS_END_DEVICE) in mvs_update_phyinfo()
1045 phy->identify.target_port_protocols = in mvs_update_phyinfo()
1047 else if (phy->identify.device_type != SAS_PHY_UNUSED) in mvs_update_phyinfo()
1048 phy->identify.target_port_protocols = in mvs_update_phyinfo()
1051 sas_phy->oob_mode = SAS_OOB_MODE; in mvs_update_phyinfo()
1052 phy->frame_rcvd_size = in mvs_update_phyinfo()
1055 memcpy(sas_phy->attached_sas_addr, in mvs_update_phyinfo()
1056 &phy->att_dev_sas_addr, SAS_ADDR_SIZE); in mvs_update_phyinfo()
1058 if (MVS_CHIP_DISP->phy_work_around) in mvs_update_phyinfo()
1059 MVS_CHIP_DISP->phy_work_around(mvi, i); in mvs_update_phyinfo()
1062 i + mvi->id * mvi->chip->n_phy, phy->att_dev_info); in mvs_update_phyinfo()
1064 i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr); in mvs_update_phyinfo()
1067 MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status); in mvs_update_phyinfo()
1072 struct sas_ha_struct *sas_ha = sas_phy->ha; in mvs_port_notify_formed()
1074 struct mvs_phy *phy = sas_phy->lldd_phy; in mvs_port_notify_formed()
1075 struct asd_sas_port *sas_port = sas_phy->port; in mvs_port_notify_formed()
1081 while (sas_ha->sas_phy[i]) { in mvs_port_notify_formed()
1082 if (sas_ha->sas_phy[i] == sas_phy) in mvs_port_notify_formed()
1086 hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy; in mvs_port_notify_formed()
1087 mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi]; in mvs_port_notify_formed()
1088 if (i >= mvi->chip->n_phy) in mvs_port_notify_formed()
1089 port = &mvi->port[i - mvi->chip->n_phy]; in mvs_port_notify_formed()
1091 port = &mvi->port[i]; in mvs_port_notify_formed()
1093 spin_lock_irqsave(&mvi->lock, flags); in mvs_port_notify_formed()
1094 port->port_attached = 1; in mvs_port_notify_formed()
1095 phy->port = port; in mvs_port_notify_formed()
1096 sas_port->lldd_port = port; in mvs_port_notify_formed()
1097 if (phy->phy_type & PORT_TYPE_SAS) { in mvs_port_notify_formed()
1098 port->wide_port_phymap = sas_port->phy_mask; in mvs_port_notify_formed()
1099 mv_printk("set wide port phy map %x\n", sas_port->phy_mask); in mvs_port_notify_formed()
1100 mvs_update_wideport(mvi, sas_phy->id); in mvs_port_notify_formed()
1103 if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { in mvs_port_notify_formed()
1104 MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); in mvs_port_notify_formed()
1105 MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); in mvs_port_notify_formed()
1109 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_port_notify_formed()
1115 struct mvs_phy *phy = sas_phy->lldd_phy; in mvs_port_notify_deformed()
1116 struct mvs_info *mvi = phy->mvi; in mvs_port_notify_deformed()
1117 struct asd_sas_port *port = sas_phy->port; in mvs_port_notify_deformed()
1120 while (phy != &mvi->phy[phy_no]) { in mvs_port_notify_deformed()
1125 list_for_each_entry(dev, &port->dev_list, dev_list_node) in mvs_port_notify_deformed()
1126 mvs_do_release_task(phy->mvi, phy_no, dev); in mvs_port_notify_deformed()
1133 mvs_port_notify_formed(sas_phy, 1); in mvs_port_formed()
1138 mvs_port_notify_deformed(sas_phy, 1); in mvs_port_deformed()
1145 if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { in mvs_alloc_dev()
1146 mvi->devices[dev].device_id = dev; in mvs_alloc_dev()
1147 return &mvi->devices[dev]; in mvs_alloc_dev()
1160 u32 id = mvi_dev->device_id; in mvs_free_dev()
1162 mvi_dev->device_id = id; in mvs_free_dev()
1163 mvi_dev->dev_type = SAS_PHY_UNUSED; in mvs_free_dev()
1164 mvi_dev->dev_status = MVS_DEV_NORMAL; in mvs_free_dev()
1165 mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; in mvs_free_dev()
1173 struct domain_device *parent_dev = dev->parent; in mvs_dev_found_notify()
1179 spin_lock_irqsave(&mvi->lock, flags); in mvs_dev_found_notify()
1183 res = -1; in mvs_dev_found_notify()
1186 dev->lldd_dev = mvi_device; in mvs_dev_found_notify()
1187 mvi_device->dev_status = MVS_DEV_NORMAL; in mvs_dev_found_notify()
1188 mvi_device->dev_type = dev->dev_type; in mvs_dev_found_notify()
1189 mvi_device->mvi_info = mvi; in mvs_dev_found_notify()
1190 mvi_device->sas_device = dev; in mvs_dev_found_notify()
1191 if (parent_dev && dev_is_expander(parent_dev->dev_type)) { in mvs_dev_found_notify()
1193 u8 phy_num = parent_dev->ex_dev.num_phys; in mvs_dev_found_notify()
1196 phy = &parent_dev->ex_dev.ex_phy[phy_id]; in mvs_dev_found_notify()
1197 if (SAS_ADDR(phy->attached_sas_addr) == in mvs_dev_found_notify()
1198 SAS_ADDR(dev->sas_addr)) { in mvs_dev_found_notify()
1199 mvi_device->attached_phy = phy_id; in mvs_dev_found_notify()
1207 SAS_ADDR(dev->sas_addr), in mvs_dev_found_notify()
1208 SAS_ADDR(parent_dev->sas_addr)); in mvs_dev_found_notify()
1209 res = -1; in mvs_dev_found_notify()
1215 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_dev_found_notify()
1221 return mvs_dev_found_notify(dev, 1); in mvs_dev_found()
1227 struct mvs_device *mvi_dev = dev->lldd_dev; in mvs_dev_gone_notify()
1235 mvi = mvi_dev->mvi_info; in mvs_dev_gone_notify()
1237 spin_lock_irqsave(&mvi->lock, flags); in mvs_dev_gone_notify()
1240 mvi_dev->device_id, mvi_dev->dev_type); in mvs_dev_gone_notify()
1245 dev->lldd_dev = NULL; in mvs_dev_gone_notify()
1246 mvi_dev->sas_device = NULL; in mvs_dev_gone_notify()
1248 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_dev_gone_notify()
1258 and hard reset for SSP (type 1) , only for RECOVERY */
1263 int reset_type = (dev->dev_type == SAS_SATA_DEV || in mvs_debug_I_T_nexus_reset()
1264 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; in mvs_debug_I_T_nexus_reset()
1271 /* mandatory SAM-3 */
1276 struct mvs_device * mvi_dev = dev->lldd_dev; in mvs_lu_reset()
1277 struct mvs_info *mvi = mvi_dev->mvi_info; in mvs_lu_reset()
1279 mvi_dev->dev_status = MVS_DEV_EH; in mvs_lu_reset()
1282 spin_lock_irqsave(&mvi->lock, flags); in mvs_lu_reset()
1284 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_lu_reset()
1286 /* If failed, fall-through I_T_Nexus reset */ in mvs_lu_reset()
1288 mvi_dev->device_id, rc); in mvs_lu_reset()
1296 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; in mvs_I_T_nexus_reset()
1297 struct mvs_info *mvi = mvi_dev->mvi_info; in mvs_I_T_nexus_reset()
1299 if (mvi_dev->dev_status != MVS_DEV_EH) in mvs_I_T_nexus_reset()
1302 mvi_dev->dev_status = MVS_DEV_NORMAL; in mvs_I_T_nexus_reset()
1305 __func__, mvi_dev->device_id, rc); in mvs_I_T_nexus_reset()
1307 spin_lock_irqsave(&mvi->lock, flags); in mvs_I_T_nexus_reset()
1309 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_I_T_nexus_reset()
1313 /* optional SAM-3 */
1319 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { in mvs_query_task()
1320 struct domain_device *dev = task->dev; in mvs_query_task()
1321 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; in mvs_query_task()
1322 struct mvs_info *mvi = mvi_dev->mvi_info; in mvs_query_task()
1344 /* mandatory SAM-3, still need free task/slot info */
1347 struct domain_device *dev = task->dev; in mvs_abort_task()
1348 struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev; in mvs_abort_task()
1359 mvi = mvi_dev->mvi_info; in mvs_abort_task()
1361 spin_lock_irqsave(&task->task_state_lock, flags); in mvs_abort_task()
1362 if (task->task_state_flags & SAS_TASK_STATE_DONE) { in mvs_abort_task()
1363 spin_unlock_irqrestore(&task->task_state_lock, flags); in mvs_abort_task()
1367 spin_unlock_irqrestore(&task->task_state_lock, flags); in mvs_abort_task()
1368 mvi_dev->dev_status = MVS_DEV_EH; in mvs_abort_task()
1369 if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) { in mvs_abort_task()
1384 if (task->lldd_task) { in mvs_abort_task()
1385 slot = task->lldd_task; in mvs_abort_task()
1386 slot_no = (u32) (slot - mvi->slot_info); in mvs_abort_task()
1387 spin_lock_irqsave(&mvi->lock, flags); in mvs_abort_task()
1388 mvs_slot_complete(mvi, slot_no, 1); in mvs_abort_task()
1389 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_abort_task()
1393 } else if (task->task_proto & SAS_PROTOCOL_SATA || in mvs_abort_task()
1394 task->task_proto & SAS_PROTOCOL_STP) { in mvs_abort_task()
1395 if (SAS_SATA_DEV == dev->dev_type) { in mvs_abort_task()
1396 struct mvs_slot_info *slot = task->lldd_task; in mvs_abort_task()
1397 u32 slot_idx = (u32)(slot - mvi->slot_info); in mvs_abort_task()
1401 task->task_state_flags |= SAS_TASK_STATE_ABORTED; in mvs_abort_task()
1417 struct mvs_device *mvi_dev = task->dev->lldd_dev; in mvs_sata_done()
1418 struct task_status_struct *tstat = &task->task_status; in mvs_sata_done()
1419 struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf; in mvs_sata_done()
1423 resp->frame_len = sizeof(struct dev_to_host_fis); in mvs_sata_done()
1424 memcpy(&resp->ending_fis[0], in mvs_sata_done()
1425 SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset), in mvs_sata_done()
1427 tstat->buf_valid_size = sizeof(*resp); in mvs_sata_done()
1451 if (len > 1) in mvs_set_sense()
1452 buffer[1] = key; /* Sense Key */ in mvs_set_sense()
1480 iu->datapres = SAS_DATAPRES_SENSE_DATA; in mvs_fill_ssp_resp_iu()
1481 iu->response_data_len = 0; in mvs_fill_ssp_resp_iu()
1482 iu->sense_data_len = 17; in mvs_fill_ssp_resp_iu()
1483 iu->status = 02; in mvs_fill_ssp_resp_iu()
1484 mvs_set_sense(iu->sense_data, 17, 0, in mvs_fill_ssp_resp_iu()
1491 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; in mvs_slot_err()
1493 u32 err_dw0 = le32_to_cpu(*(u32 *)slot->response); in mvs_slot_err()
1494 u32 err_dw1 = le32_to_cpu(*((u32 *)slot->response + 1)); in mvs_slot_err()
1499 MVS_CHIP_DISP->issue_stop(mvi, type, tfs); in mvs_slot_err()
1501 MVS_CHIP_DISP->command_active(mvi, slot_idx); in mvs_slot_err()
1504 switch (task->task_proto) { in mvs_slot_err()
1509 struct ssp_response_iu *iu = slot->response + in mvs_slot_err()
1512 sas_ssp_task_response(mvi->dev, task, iu); in mvs_slot_err()
1527 task->ata_task.use_ncq = 0; in mvs_slot_err()
1542 struct mvs_slot_info *slot = &mvi->slot_info[slot_idx]; in mvs_slot_complete()
1543 struct sas_task *task = slot->task; in mvs_slot_complete()
1552 if (unlikely(!task || !task->lldd_task || !task->dev)) in mvs_slot_complete()
1553 return -1; in mvs_slot_complete()
1555 tstat = &task->task_status; in mvs_slot_complete()
1556 dev = task->dev; in mvs_slot_complete()
1557 mvi_dev = dev->lldd_dev; in mvs_slot_complete()
1559 spin_lock(&task->task_state_lock); in mvs_slot_complete()
1560 task->task_state_flags &= ~SAS_TASK_STATE_PENDING; in mvs_slot_complete()
1561 task->task_state_flags |= SAS_TASK_STATE_DONE; in mvs_slot_complete()
1563 aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED; in mvs_slot_complete()
1564 spin_unlock(&task->task_state_lock); in mvs_slot_complete()
1567 tstat->resp = SAS_TASK_COMPLETE; in mvs_slot_complete()
1570 tstat->stat = SAS_ABORTED_TASK; in mvs_slot_complete()
1571 if (mvi_dev && mvi_dev->running_req) in mvs_slot_complete()
1572 mvi_dev->running_req--; in mvs_slot_complete()
1573 if (sas_protocol_ata(task->task_proto)) in mvs_slot_complete()
1577 return -1; in mvs_slot_complete()
1584 tstat->stat = SAS_PHY_DOWN; in mvs_slot_complete()
1589 * error info record present; slot->response is 32 bit aligned but may in mvs_slot_complete()
1593 && (*((u32 *)slot->response) in mvs_slot_complete()
1594 || *(((u32 *)slot->response) + 1)))) { in mvs_slot_complete()
1596 "%016llX.\n", slot->port->sas_port.id, slot_idx, in mvs_slot_complete()
1597 rx_desc, get_unaligned_le64(slot->response)); in mvs_slot_complete()
1598 tstat->stat = mvs_slot_err(mvi, task, slot_idx); in mvs_slot_complete()
1599 tstat->resp = SAS_TASK_COMPLETE; in mvs_slot_complete()
1603 switch (task->task_proto) { in mvs_slot_complete()
1607 tstat->stat = SAS_SAM_STAT_GOOD; in mvs_slot_complete()
1608 tstat->resp = SAS_TASK_COMPLETE; in mvs_slot_complete()
1612 struct ssp_response_iu *iu = slot->response + in mvs_slot_complete()
1614 sas_ssp_task_response(mvi->dev, task, iu); in mvs_slot_complete()
1616 tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; in mvs_slot_complete()
1620 struct scatterlist *sg_resp = &task->smp_task.smp_resp; in mvs_slot_complete()
1621 tstat->stat = SAS_SAM_STAT_GOOD; in mvs_slot_complete()
1623 memcpy(to + sg_resp->offset, in mvs_slot_complete()
1624 slot->response + sizeof(struct mvs_err_info), in mvs_slot_complete()
1633 tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0); in mvs_slot_complete()
1638 tstat->stat = SAS_SAM_STAT_CHECK_CONDITION; in mvs_slot_complete()
1641 if (!slot->port->port_attached) { in mvs_slot_complete()
1642 mv_dprintk("port %d has removed.\n", slot->port->sas_port.id); in mvs_slot_complete()
1643 tstat->stat = SAS_PHY_DOWN; in mvs_slot_complete()
1648 if (mvi_dev && mvi_dev->running_req) { in mvs_slot_complete()
1649 mvi_dev->running_req--; in mvs_slot_complete()
1650 if (sas_protocol_ata(task->task_proto) && !mvi_dev->running_req) in mvs_slot_complete()
1654 sts = tstat->stat; in mvs_slot_complete()
1656 spin_unlock(&mvi->lock); in mvs_slot_complete()
1657 if (task->task_done) in mvs_slot_complete()
1658 task->task_done(task); in mvs_slot_complete()
1660 spin_lock(&mvi->lock); in mvs_slot_complete()
1673 phy = &mvi->phy[phy_no]; in mvs_do_release_task()
1674 port = phy->port; in mvs_do_release_task()
1682 list_for_each_entry_safe(slot, slot2, &port->list, entry) { in mvs_do_release_task()
1684 slot_idx = (u32) (slot - mvi->slot_info); in mvs_do_release_task()
1685 task = slot->task; in mvs_do_release_task()
1687 if (dev && task->dev != dev) in mvs_do_release_task()
1691 slot_idx, slot->slot_tag, task); in mvs_do_release_task()
1692 MVS_CHIP_DISP->command_active(mvi, slot_idx); in mvs_do_release_task()
1694 mvs_slot_complete(mvi, slot_idx, 1); in mvs_do_release_task()
1709 phy->phy_attached = 0; in mvs_phy_disconnected()
1710 phy->att_dev_info = 0; in mvs_phy_disconnected()
1711 phy->att_dev_sas_addr = 0; in mvs_phy_disconnected()
1718 struct mvs_info *mvi = mwq->mvi; in mvs_work_queue()
1720 u32 phy_no = (unsigned long) mwq->data; in mvs_work_queue()
1721 struct mvs_phy *phy = &mvi->phy[phy_no]; in mvs_work_queue()
1722 struct asd_sas_phy *sas_phy = &phy->sas_phy; in mvs_work_queue()
1724 spin_lock_irqsave(&mvi->lock, flags); in mvs_work_queue()
1725 if (mwq->handler & PHY_PLUG_EVENT) { in mvs_work_queue()
1727 if (phy->phy_event & PHY_PLUG_OUT) { in mvs_work_queue()
1730 tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no); in mvs_work_queue()
1731 phy->phy_event &= ~PHY_PLUG_OUT; in mvs_work_queue()
1739 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); in mvs_work_queue()
1740 mvs_update_phyinfo(mvi, phy_no, 1); in mvs_work_queue()
1746 } else if (mwq->handler & EXP_BRCT_CHG) { in mvs_work_queue()
1747 phy->phy_event &= ~EXP_BRCT_CHG; in mvs_work_queue()
1752 list_del(&mwq->entry); in mvs_work_queue()
1753 spin_unlock_irqrestore(&mvi->lock, flags); in mvs_work_queue()
1764 mwq->mvi = mvi; in mvs_handle_event()
1765 mwq->data = data; in mvs_handle_event()
1766 mwq->handler = handler; in mvs_handle_event()
1767 MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq); in mvs_handle_event()
1768 list_add_tail(&mwq->entry, &mvi->wq_list); in mvs_handle_event()
1769 schedule_delayed_work(&mwq->work_q, HZ * 2); in mvs_handle_event()
1771 ret = -ENOMEM; in mvs_handle_event()
1779 struct mvs_info *mvi = phy->mvi; in mvs_sig_time_out()
1782 for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) { in mvs_sig_time_out()
1783 if (&mvi->phy[phy_no] == phy) { in mvs_sig_time_out()
1785 phy_no+mvi->id*mvi->chip->n_phy); in mvs_sig_time_out()
1786 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_HARD_RESET); in mvs_sig_time_out()
1794 struct mvs_phy *phy = &mvi->phy[phy_no]; in mvs_int_port()
1796 phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no); in mvs_int_port()
1797 MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status); in mvs_int_port()
1798 mv_dprintk("phy %d ctrl sts=0x%08X.\n", phy_no+mvi->id*mvi->chip->n_phy, in mvs_int_port()
1799 MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no)); in mvs_int_port()
1800 mv_dprintk("phy %d irq sts = 0x%08X\n", phy_no+mvi->id*mvi->chip->n_phy, in mvs_int_port()
1801 phy->irq_status); in mvs_int_port()
1808 if (phy->irq_status & PHYEV_DCDR_ERR) { in mvs_int_port()
1810 phy_no + mvi->id*mvi->chip->n_phy); in mvs_int_port()
1813 if (phy->irq_status & PHYEV_POOF) { in mvs_int_port()
1815 if (!(phy->phy_event & PHY_PLUG_OUT)) { in mvs_int_port()
1816 int dev_sata = phy->phy_type & PORT_TYPE_SATA; in mvs_int_port()
1819 phy->phy_event |= PHY_PLUG_OUT; in mvs_int_port()
1820 MVS_CHIP_DISP->clear_srs_irq(mvi, 0, 1); in mvs_int_port()
1826 if (MVS_CHIP_DISP->stp_reset) in mvs_int_port()
1827 MVS_CHIP_DISP->stp_reset(mvi, in mvs_int_port()
1830 MVS_CHIP_DISP->phy_reset(mvi, in mvs_int_port()
1837 if (phy->irq_status & PHYEV_COMWAKE) { in mvs_int_port()
1838 tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no); in mvs_int_port()
1839 MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, in mvs_int_port()
1841 if (phy->timer.function == NULL) { in mvs_int_port()
1842 phy->timer.function = mvs_sig_time_out; in mvs_int_port()
1843 phy->timer.expires = jiffies + 5*HZ; in mvs_int_port()
1844 add_timer(&phy->timer); in mvs_int_port()
1847 if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) { in mvs_int_port()
1848 phy->phy_status = mvs_is_phy_ready(mvi, phy_no); in mvs_int_port()
1850 if (phy->phy_status) { in mvs_int_port()
1852 MVS_CHIP_DISP->detect_porttype(mvi, phy_no); in mvs_int_port()
1853 if (phy->phy_type & PORT_TYPE_SATA) { in mvs_int_port()
1854 tmp = MVS_CHIP_DISP->read_port_irq_mask( in mvs_int_port()
1857 MVS_CHIP_DISP->write_port_irq_mask(mvi, in mvs_int_port()
1861 if (phy->phy_type & PORT_TYPE_SAS) { in mvs_int_port()
1862 MVS_CHIP_DISP->phy_reset(mvi, phy_no, MVS_PHY_TUNE); in mvs_int_port()
1868 if (phy->phy_event & PHY_PLUG_OUT) { in mvs_int_port()
1869 mvs_port_notify_formed(&phy->sas_phy, 0); in mvs_int_port()
1870 phy->phy_event &= ~PHY_PLUG_OUT; in mvs_int_port()
1874 phy_no + mvi->id*mvi->chip->n_phy); in mvs_int_port()
1876 } else if (phy->irq_status & PHYEV_BROAD_CH) { in mvs_int_port()
1878 phy_no + mvi->id*mvi->chip->n_phy); in mvs_int_port()
1894 rx_prod_idx = mvi->rx_cons; in mvs_int_rx()
1895 mvi->rx_cons = le32_to_cpu(mvi->rx[0]); in mvs_int_rx()
1896 if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */ in mvs_int_rx()
1903 if (unlikely(mvi->rx_cons == rx_prod_idx)) in mvs_int_rx()
1904 mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK; in mvs_int_rx()
1906 if (mvi->rx_cons == rx_prod_idx) in mvs_int_rx()
1909 while (mvi->rx_cons != rx_prod_idx) { in mvs_int_rx()
1911 rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1); in mvs_int_rx()
1912 rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]); in mvs_int_rx()
1927 MVS_CHIP_DISP->int_full(mvi); in mvs_int_rx()
1931 int mvs_gpio_write(struct sas_ha_struct *sha, u8 reg_type, u8 reg_index, in mvs_gpio_write() argument
1934 struct mvs_prv_info *mvs_prv = sha->lldd_ha; in mvs_gpio_write()
1935 struct mvs_info *mvi = mvs_prv->mvi[0]; in mvs_gpio_write()
1937 if (MVS_CHIP_DISP->gpio_write) { in mvs_gpio_write()
1938 return MVS_CHIP_DISP->gpio_write(mvs_prv, reg_type, in mvs_gpio_write()
1942 return -ENOSYS; in mvs_gpio_write()