Lines Matching refs:qc
532 unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, unsigned char *buf, in ata_sff_data_xfer() argument
535 struct ata_port *ap = qc->dev->link->ap; in ata_sff_data_xfer()
587 unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, in ata_sff_data_xfer32() argument
590 struct ata_device *dev = qc->dev; in ata_sff_data_xfer32()
597 return ata_sff_data_xfer(qc, buf, buflen, rw); in ata_sff_data_xfer32()
634 static void ata_pio_xfer(struct ata_queued_cmd *qc, struct page *page, in ata_pio_xfer() argument
637 bool do_write = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_pio_xfer()
641 qc->ap->ops->sff_data_xfer(qc, buf + offset, xfer_size, do_write); in ata_pio_xfer()
657 static void ata_pio_sector(struct ata_queued_cmd *qc) in ata_pio_sector() argument
659 struct ata_port *ap = qc->ap; in ata_pio_sector()
663 if (!qc->cursg) { in ata_pio_sector()
664 qc->curbytes = qc->nbytes; in ata_pio_sector()
667 if (qc->curbytes == qc->nbytes - qc->sect_size) in ata_pio_sector()
670 page = sg_page(qc->cursg); in ata_pio_sector()
671 offset = qc->cursg->offset + qc->cursg_ofs; in ata_pio_sector()
677 trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size); in ata_pio_sector()
684 if (offset + qc->sect_size > PAGE_SIZE) { in ata_pio_sector()
687 ata_pio_xfer(qc, page, offset, split_len); in ata_pio_sector()
688 ata_pio_xfer(qc, nth_page(page, 1), 0, in ata_pio_sector()
689 qc->sect_size - split_len); in ata_pio_sector()
691 ata_pio_xfer(qc, page, offset, qc->sect_size); in ata_pio_sector()
694 qc->curbytes += qc->sect_size; in ata_pio_sector()
695 qc->cursg_ofs += qc->sect_size; in ata_pio_sector()
697 if (qc->cursg_ofs == qc->cursg->length) { in ata_pio_sector()
698 qc->cursg = sg_next(qc->cursg); in ata_pio_sector()
699 if (!qc->cursg) in ata_pio_sector()
701 qc->cursg_ofs = 0; in ata_pio_sector()
715 static void ata_pio_sectors(struct ata_queued_cmd *qc) in ata_pio_sectors() argument
717 if (is_multi_taskfile(&qc->tf)) { in ata_pio_sectors()
721 WARN_ON_ONCE(qc->dev->multi_count == 0); in ata_pio_sectors()
723 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, in ata_pio_sectors()
724 qc->dev->multi_count); in ata_pio_sectors()
726 ata_pio_sector(qc); in ata_pio_sectors()
728 ata_pio_sector(qc); in ata_pio_sectors()
730 ata_sff_sync(qc->ap); /* flush */ in ata_pio_sectors()
744 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) in atapi_send_cdb() argument
747 trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len); in atapi_send_cdb()
748 WARN_ON_ONCE(qc->dev->cdb_len < 12); in atapi_send_cdb()
750 ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1); in atapi_send_cdb()
754 switch (qc->tf.protocol) { in atapi_send_cdb()
765 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); in atapi_send_cdb()
766 ap->ops->bmdma_start(qc); in atapi_send_cdb()
785 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) in __atapi_pio_bytes() argument
787 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; in __atapi_pio_bytes()
788 struct ata_port *ap = qc->ap; in __atapi_pio_bytes()
789 struct ata_device *dev = qc->dev; in __atapi_pio_bytes()
797 sg = qc->cursg; in __atapi_pio_bytes()
801 qc->nbytes, qc->curbytes, bytes); in __atapi_pio_bytes()
806 offset = sg->offset + qc->cursg_ofs; in __atapi_pio_bytes()
813 count = min(sg->length - qc->cursg_ofs, bytes); in __atapi_pio_bytes()
818 trace_atapi_pio_transfer_data(qc, offset, count); in __atapi_pio_bytes()
822 consumed = ap->ops->sff_data_xfer(qc, buf + offset, count, rw); in __atapi_pio_bytes()
826 qc->curbytes += count; in __atapi_pio_bytes()
827 qc->cursg_ofs += count; in __atapi_pio_bytes()
829 if (qc->cursg_ofs == sg->length) { in __atapi_pio_bytes()
830 qc->cursg = sg_next(qc->cursg); in __atapi_pio_bytes()
831 qc->cursg_ofs = 0; in __atapi_pio_bytes()
854 static void atapi_pio_bytes(struct ata_queued_cmd *qc) in atapi_pio_bytes() argument
856 struct ata_port *ap = qc->ap; in atapi_pio_bytes()
857 struct ata_device *dev = qc->dev; in atapi_pio_bytes()
860 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; in atapi_pio_bytes()
868 ap->ops->sff_tf_read(ap, &qc->result_tf); in atapi_pio_bytes()
869 ireason = qc->result_tf.nsect; in atapi_pio_bytes()
870 bc_lo = qc->result_tf.lbam; in atapi_pio_bytes()
871 bc_hi = qc->result_tf.lbah; in atapi_pio_bytes()
886 if (unlikely(__atapi_pio_bytes(qc, bytes))) in atapi_pio_bytes()
896 qc->err_mask |= AC_ERR_HSM; in atapi_pio_bytes()
909 struct ata_queued_cmd *qc) in ata_hsm_ok_in_wq() argument
911 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_hsm_ok_in_wq()
915 if (qc->tf.protocol == ATA_PROT_PIO && in ata_hsm_ok_in_wq()
916 (qc->tf.flags & ATA_TFLAG_WRITE)) in ata_hsm_ok_in_wq()
919 if (ata_is_atapi(qc->tf.protocol) && in ata_hsm_ok_in_wq()
920 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_hsm_ok_in_wq()
938 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) in ata_hsm_qc_complete() argument
940 struct ata_port *ap = qc->ap; in ata_hsm_qc_complete()
947 qc = ata_qc_from_tag(ap, qc->tag); in ata_hsm_qc_complete()
948 if (qc) { in ata_hsm_qc_complete()
949 if (likely(!(qc->err_mask & AC_ERR_HSM))) { in ata_hsm_qc_complete()
951 ata_qc_complete(qc); in ata_hsm_qc_complete()
956 if (likely(!(qc->err_mask & AC_ERR_HSM))) in ata_hsm_qc_complete()
957 ata_qc_complete(qc); in ata_hsm_qc_complete()
964 ata_qc_complete(qc); in ata_hsm_qc_complete()
966 ata_qc_complete(qc); in ata_hsm_qc_complete()
980 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, in ata_sff_hsm_move() argument
983 struct ata_link *link = qc->dev->link; in ata_sff_hsm_move()
989 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); in ata_sff_hsm_move()
995 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc)); in ata_sff_hsm_move()
998 trace_ata_sff_hsm_state(qc, status); in ata_sff_hsm_move()
1008 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); in ata_sff_hsm_move()
1015 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1020 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1039 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { in ata_sff_hsm_move()
1043 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1049 if (qc->tf.protocol == ATA_PROT_PIO) { in ata_sff_hsm_move()
1059 ata_pio_sectors(qc); in ata_sff_hsm_move()
1062 atapi_send_cdb(ap, qc); in ata_sff_hsm_move()
1071 if (qc->tf.protocol == ATAPI_PROT_PIO) { in ata_sff_hsm_move()
1091 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1096 atapi_pio_bytes(qc); in ata_sff_hsm_move()
1108 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1114 if (qc->dev->horkage & in ata_sff_hsm_move()
1116 qc->err_mask |= in ata_sff_hsm_move()
1126 qc->err_mask |= AC_ERR_HSM | in ata_sff_hsm_move()
1146 qc->err_mask |= AC_ERR_DEV; in ata_sff_hsm_move()
1148 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { in ata_sff_hsm_move()
1149 ata_pio_sectors(qc); in ata_sff_hsm_move()
1157 qc->err_mask |= AC_ERR_HSM; in ata_sff_hsm_move()
1168 qc->err_mask |= AC_ERR_NODEV_HINT; in ata_sff_hsm_move()
1178 ata_pio_sectors(qc); in ata_sff_hsm_move()
1181 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { in ata_sff_hsm_move()
1193 qc->err_mask |= __ac_err_mask(status); in ata_sff_hsm_move()
1199 trace_ata_sff_hsm_command_complete(qc, status); in ata_sff_hsm_move()
1201 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM)); in ata_sff_hsm_move()
1206 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1215 ata_hsm_qc_complete(qc, in_wq); in ata_sff_hsm_move()
1280 struct ata_queued_cmd *qc; in ata_sff_pio_task() local
1288 qc = ata_qc_from_tag(ap, link->active_tag); in ata_sff_pio_task()
1289 if (!qc) { in ata_sff_pio_task()
1323 poll_next = ata_sff_hsm_move(ap, qc, status, 1); in ata_sff_pio_task()
1347 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) in ata_sff_qc_issue() argument
1349 struct ata_port *ap = qc->ap; in ata_sff_qc_issue()
1350 struct ata_link *link = qc->dev->link; in ata_sff_qc_issue()
1356 qc->tf.flags |= ATA_TFLAG_POLLING; in ata_sff_qc_issue()
1359 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_sff_qc_issue()
1362 switch (qc->tf.protocol) { in ata_sff_qc_issue()
1364 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1365 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1367 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1370 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1376 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1377 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1379 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1381 if (qc->tf.flags & ATA_TFLAG_WRITE) { in ata_sff_qc_issue()
1393 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1406 if (qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_qc_issue()
1407 ata_qc_set_polling(qc); in ata_sff_qc_issue()
1409 ata_tf_to_host(ap, &qc->tf, qc->tag); in ata_sff_qc_issue()
1414 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || in ata_sff_qc_issue()
1415 (qc->tf.flags & ATA_TFLAG_POLLING)) in ata_sff_qc_issue()
1440 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) in ata_sff_qc_fill_rtf() argument
1442 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); in ata_sff_qc_fill_rtf()
1464 struct ata_queued_cmd *qc, in __ata_sff_port_intr() argument
1469 trace_ata_sff_port_intr(qc, hsmv_on_idle); in __ata_sff_port_intr()
1482 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in __ata_sff_port_intr()
1496 qc->err_mask |= AC_ERR_HSM; in __ata_sff_port_intr()
1506 ata_sff_hsm_move(ap, qc, status, 0); in __ata_sff_port_intr()
1524 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_sff_port_intr() argument
1526 return __ata_sff_port_intr(ap, qc, false); in ata_sff_port_intr()
1546 struct ata_queued_cmd *qc; in __ata_sff_interrupt() local
1548 qc = ata_qc_from_tag(ap, ap->link.active_tag); in __ata_sff_interrupt()
1549 if (qc) { in __ata_sff_interrupt()
1550 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) in __ata_sff_interrupt()
1551 handled |= port_intr(ap, qc); in __ata_sff_interrupt()
1638 struct ata_queued_cmd *qc; in ata_sff_lost_interrupt() local
1641 qc = ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_lost_interrupt()
1643 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING) in ata_sff_lost_interrupt()
1657 ata_sff_port_intr(ap, qc); in ata_sff_lost_interrupt()
2091 void ata_sff_drain_fifo(struct ata_queued_cmd *qc) in ata_sff_drain_fifo() argument
2097 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE) in ata_sff_drain_fifo()
2100 ap = qc->ap; in ata_sff_drain_fifo()
2128 struct ata_queued_cmd *qc; in ata_sff_error_handler() local
2131 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_sff_error_handler()
2132 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) in ata_sff_error_handler()
2133 qc = NULL; in ata_sff_error_handler()
2145 ap->ops->sff_drain_fifo(qc); in ata_sff_error_handler()
2567 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg() argument
2569 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg()
2575 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg()
2617 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc) in ata_bmdma_fill_sg_dumb() argument
2619 struct ata_port *ap = qc->ap; in ata_bmdma_fill_sg_dumb()
2625 for_each_sg(qc->sg, sg, qc->n_elem, si) { in ata_bmdma_fill_sg_dumb()
2672 enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_qc_prep() argument
2674 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_qc_prep()
2677 ata_bmdma_fill_sg(qc); in ata_bmdma_qc_prep()
2692 enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) in ata_bmdma_dumb_qc_prep() argument
2694 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in ata_bmdma_dumb_qc_prep()
2697 ata_bmdma_fill_sg_dumb(qc); in ata_bmdma_dumb_qc_prep()
2717 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc) in ata_bmdma_qc_issue() argument
2719 struct ata_port *ap = qc->ap; in ata_bmdma_qc_issue()
2720 struct ata_link *link = qc->dev->link; in ata_bmdma_qc_issue()
2723 if (!ata_is_dma(qc->tf.protocol)) in ata_bmdma_qc_issue()
2724 return ata_sff_qc_issue(qc); in ata_bmdma_qc_issue()
2727 ata_dev_select(ap, qc->dev->devno, 1, 0); in ata_bmdma_qc_issue()
2730 switch (qc->tf.protocol) { in ata_bmdma_qc_issue()
2732 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2734 trace_ata_tf_load(ap, &qc->tf); in ata_bmdma_qc_issue()
2735 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2736 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2737 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2738 trace_ata_bmdma_start(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2739 ap->ops->bmdma_start(qc); /* initiate bmdma */ in ata_bmdma_qc_issue()
2744 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in ata_bmdma_qc_issue()
2746 trace_ata_tf_load(ap, &qc->tf); in ata_bmdma_qc_issue()
2747 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in ata_bmdma_qc_issue()
2748 trace_ata_bmdma_setup(ap, &qc->tf, qc->tag); in ata_bmdma_qc_issue()
2749 ap->ops->bmdma_setup(qc); /* set up bmdma */ in ata_bmdma_qc_issue()
2753 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) in ata_bmdma_qc_issue()
2779 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc) in ata_bmdma_port_intr() argument
2786 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_port_intr()
2796 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_port_intr()
2797 ap->ops->bmdma_stop(qc); in ata_bmdma_port_intr()
2802 qc->err_mask |= AC_ERR_HOST_BUS; in ata_bmdma_port_intr()
2807 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped); in ata_bmdma_port_intr()
2809 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) in ata_bmdma_port_intr()
2850 struct ata_queued_cmd *qc; in ata_bmdma_error_handler() local
2854 qc = __ata_qc_from_tag(ap, ap->link.active_tag); in ata_bmdma_error_handler()
2855 if (qc && !(qc->flags & ATA_QCFLAG_FAILED)) in ata_bmdma_error_handler()
2856 qc = NULL; in ata_bmdma_error_handler()
2861 if (qc && ata_is_dma(qc->tf.protocol)) { in ata_bmdma_error_handler()
2872 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) { in ata_bmdma_error_handler()
2873 qc->err_mask = AC_ERR_HOST_BUS; in ata_bmdma_error_handler()
2877 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_error_handler()
2878 ap->ops->bmdma_stop(qc); in ata_bmdma_error_handler()
2904 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc) in ata_bmdma_post_internal_cmd() argument
2906 struct ata_port *ap = qc->ap; in ata_bmdma_post_internal_cmd()
2909 if (ata_is_dma(qc->tf.protocol)) { in ata_bmdma_post_internal_cmd()
2911 trace_ata_bmdma_stop(ap, &qc->tf, qc->tag); in ata_bmdma_post_internal_cmd()
2912 ap->ops->bmdma_stop(qc); in ata_bmdma_post_internal_cmd()
2947 void ata_bmdma_setup(struct ata_queued_cmd *qc) in ata_bmdma_setup() argument
2949 struct ata_port *ap = qc->ap; in ata_bmdma_setup()
2950 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); in ata_bmdma_setup()
2965 ap->ops->sff_exec_command(ap, &qc->tf); in ata_bmdma_setup()
2976 void ata_bmdma_start(struct ata_queued_cmd *qc) in ata_bmdma_start() argument
2978 struct ata_port *ap = qc->ap; in ata_bmdma_start()
3013 void ata_bmdma_stop(struct ata_queued_cmd *qc) in ata_bmdma_stop() argument
3015 struct ata_port *ap = qc->ap; in ata_bmdma_stop()