Lines Matching refs:qc

315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
764 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc) in nv_adma_check_atapi_dma() argument
766 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_check_atapi_dma()
863 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_host_intr() local
876 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_host_intr()
882 return ata_bmdma_port_intr(ap, qc); in nv_host_intr()
1094 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc) in nv_adma_post_internal_cmd() argument
1096 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_post_internal_cmd()
1099 ata_bmdma_post_internal_cmd(qc); in nv_adma_post_internal_cmd()
1312 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc, in nv_adma_fill_aprd() argument
1318 if (qc->tf.flags & ATA_TFLAG_WRITE) in nv_adma_fill_aprd()
1320 if (idx == qc->n_elem - 1) in nv_adma_fill_aprd()
1331 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) in nv_adma_fill_sg() argument
1333 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_fill_sg()
1340 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_adma_fill_sg()
1342 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; in nv_adma_fill_sg()
1343 nv_adma_fill_aprd(qc, sg, si, aprd); in nv_adma_fill_sg()
1346 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); in nv_adma_fill_sg()
1351 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) in nv_adma_use_reg_mode() argument
1353 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_use_reg_mode()
1358 (qc->tf.flags & ATA_TFLAG_POLLING)) in nv_adma_use_reg_mode()
1361 if ((qc->flags & ATA_QCFLAG_DMAMAP) || in nv_adma_use_reg_mode()
1362 (qc->tf.protocol == ATA_PROT_NODATA)) in nv_adma_use_reg_mode()
1368 static void nv_adma_qc_prep(struct ata_queued_cmd *qc) in nv_adma_qc_prep() argument
1370 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_prep()
1371 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; in nv_adma_qc_prep()
1375 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_prep()
1377 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_prep()
1378 nv_adma_register_mode(qc->ap); in nv_adma_qc_prep()
1379 ata_bmdma_qc_prep(qc); in nv_adma_qc_prep()
1389 cpb->tag = qc->hw_tag; in nv_adma_qc_prep()
1393 if (qc->tf.protocol == ATA_PROT_NCQ) in nv_adma_qc_prep()
1396 VPRINTK("qc->flags = 0x%lx\n", qc->flags); in nv_adma_qc_prep()
1398 nv_adma_tf_to_cpb(&qc->tf, cpb->tf); in nv_adma_qc_prep()
1400 if (qc->flags & ATA_QCFLAG_DMAMAP) { in nv_adma_qc_prep()
1401 nv_adma_fill_sg(qc, cpb); in nv_adma_qc_prep()
1414 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) in nv_adma_qc_issue() argument
1416 struct nv_adma_port_priv *pp = qc->ap->private_data; in nv_adma_qc_issue()
1418 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ); in nv_adma_qc_issue()
1425 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ && in nv_adma_qc_issue()
1426 (qc->flags & ATA_QCFLAG_RESULT_TF))) { in nv_adma_qc_issue()
1427 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n"); in nv_adma_qc_issue()
1431 if (nv_adma_use_reg_mode(qc)) { in nv_adma_qc_issue()
1433 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags); in nv_adma_qc_issue()
1435 (qc->flags & ATA_QCFLAG_DMAMAP)); in nv_adma_qc_issue()
1436 nv_adma_register_mode(qc->ap); in nv_adma_qc_issue()
1437 return ata_bmdma_qc_issue(qc); in nv_adma_qc_issue()
1439 nv_adma_mode(qc->ap); in nv_adma_qc_issue()
1452 writew(qc->hw_tag, mmio + NV_ADMA_APPEND); in nv_adma_qc_issue()
1454 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_adma_qc_issue()
1470 struct ata_queued_cmd *qc; in nv_generic_interrupt() local
1472 qc = ata_qc_from_tag(ap, ap->link.active_tag); in nv_generic_interrupt()
1473 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) { in nv_generic_interrupt()
1474 handled += ata_bmdma_port_intr(ap, qc); in nv_generic_interrupt()
1709 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) in nv_swncq_qc_to_dq() argument
1716 dq->defer_bits |= (1 << qc->hw_tag); in nv_swncq_qc_to_dq()
1717 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag; in nv_swncq_qc_to_dq()
1769 struct ata_queued_cmd qc; in __ata_bmdma_stop() local
1771 qc.ap = ap; in __ata_bmdma_stop()
1772 ata_bmdma_stop(&qc); in __ata_bmdma_stop()
1975 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) in nv_swncq_qc_prep() argument
1977 if (qc->tf.protocol != ATA_PROT_NCQ) { in nv_swncq_qc_prep()
1978 ata_bmdma_qc_prep(qc); in nv_swncq_qc_prep()
1982 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) in nv_swncq_qc_prep()
1985 nv_swncq_fill_sg(qc); in nv_swncq_qc_prep()
1988 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) in nv_swncq_fill_sg() argument
1990 struct ata_port *ap = qc->ap; in nv_swncq_fill_sg()
1996 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; in nv_swncq_fill_sg()
1999 for_each_sg(qc->sg, sg, qc->n_elem, si) { in nv_swncq_fill_sg()
2025 struct ata_queued_cmd *qc) in nv_swncq_issue_atacmd() argument
2029 if (qc == NULL) in nv_swncq_issue_atacmd()
2034 writel((1 << qc->hw_tag), pp->sactive_block); in nv_swncq_issue_atacmd()
2035 pp->last_issue_tag = qc->hw_tag; in nv_swncq_issue_atacmd()
2036 pp->dhfis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2037 pp->dmafis_bits &= ~(1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2038 pp->qc_active |= (0x1 << qc->hw_tag); in nv_swncq_issue_atacmd()
2040 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ in nv_swncq_issue_atacmd()
2041 ap->ops->sff_exec_command(ap, &qc->tf); in nv_swncq_issue_atacmd()
2043 DPRINTK("Issued tag %u\n", qc->hw_tag); in nv_swncq_issue_atacmd()
2048 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc) in nv_swncq_qc_issue() argument
2050 struct ata_port *ap = qc->ap; in nv_swncq_qc_issue()
2053 if (qc->tf.protocol != ATA_PROT_NCQ) in nv_swncq_qc_issue()
2054 return ata_bmdma_qc_issue(qc); in nv_swncq_qc_issue()
2059 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_qc_issue()
2061 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */ in nv_swncq_qc_issue()
2093 struct ata_queued_cmd *qc; in nv_swncq_sdbfis() local
2149 qc = ata_qc_from_tag(ap, pp->last_issue_tag); in nv_swncq_sdbfis()
2150 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2156 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_sdbfis()
2157 WARN_ON(qc == NULL); in nv_swncq_sdbfis()
2158 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_sdbfis()
2175 struct ata_queued_cmd *qc; in nv_swncq_dmafis() local
2185 qc = ata_qc_from_tag(ap, tag); in nv_swncq_dmafis()
2187 if (unlikely(!qc)) in nv_swncq_dmafis()
2190 rw = qc->tf.flags & ATA_TFLAG_WRITE; in nv_swncq_dmafis()
2193 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, in nv_swncq_dmafis()
2208 struct ata_queued_cmd *qc; in nv_swncq_host_interrupt() local
2281 qc = nv_swncq_qc_from_dq(ap); in nv_swncq_host_interrupt()
2282 nv_swncq_issue_atacmd(ap, qc); in nv_swncq_host_interrupt()