Lines Matching refs:brq

1245 static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,  in mmc_apply_rel_rw()  argument
1252 brq->data.blocks = 1; in mmc_apply_rel_rw()
1254 if (brq->data.blocks > card->ext_csd.rel_sectors) in mmc_apply_rel_rw()
1255 brq->data.blocks = card->ext_csd.rel_sectors; in mmc_apply_rel_rw()
1256 else if (brq->data.blocks < card->ext_csd.rel_sectors) in mmc_apply_rel_rw()
1257 brq->data.blocks = 1; in mmc_apply_rel_rw()
1273 static void mmc_blk_eval_resp_error(struct mmc_blk_request *brq) in mmc_blk_eval_resp_error() argument
1303 if (!brq->stop.error) { in mmc_blk_eval_resp_error()
1307 val = brq->stop.resp[0] & CMD_ERRORS; in mmc_blk_eval_resp_error()
1308 oor_with_open_end = val & R1_OUT_OF_RANGE && !brq->mrq.sbc; in mmc_blk_eval_resp_error()
1311 brq->stop.error = -EIO; in mmc_blk_eval_resp_error()
1321 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_data_prep() local
1333 memset(brq, 0, sizeof(struct mmc_blk_request)); in mmc_blk_data_prep()
1337 brq->mrq.data = &brq->data; in mmc_blk_data_prep()
1338 brq->mrq.tag = req->tag; in mmc_blk_data_prep()
1340 brq->stop.opcode = MMC_STOP_TRANSMISSION; in mmc_blk_data_prep()
1341 brq->stop.arg = 0; in mmc_blk_data_prep()
1344 brq->data.flags = MMC_DATA_READ; in mmc_blk_data_prep()
1345 brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; in mmc_blk_data_prep()
1347 brq->data.flags = MMC_DATA_WRITE; in mmc_blk_data_prep()
1348 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; in mmc_blk_data_prep()
1351 brq->data.blksz = 512; in mmc_blk_data_prep()
1352 brq->data.blocks = blk_rq_sectors(req); in mmc_blk_data_prep()
1353 brq->data.blk_addr = blk_rq_pos(req); in mmc_blk_data_prep()
1367 if (brq->data.blocks > card->host->max_blk_count) in mmc_blk_data_prep()
1368 brq->data.blocks = card->host->max_blk_count; in mmc_blk_data_prep()
1370 if (brq->data.blocks > 1) { in mmc_blk_data_prep()
1379 brq->data.blocks--; in mmc_blk_data_prep()
1387 brq->data.blocks = queue_physical_block_size(mq->queue) >> 9; in mmc_blk_data_prep()
1394 brq->data.blocks = card->host->ops->multi_io_quirk(card, in mmc_blk_data_prep()
1397 brq->data.blocks); in mmc_blk_data_prep()
1401 mmc_apply_rel_rw(brq, card, req); in mmc_blk_data_prep()
1402 brq->data.flags |= MMC_DATA_REL_WR; in mmc_blk_data_prep()
1412 ((brq->data.blocks * brq->data.blksz) >= in mmc_blk_data_prep()
1416 brq->data.flags |= MMC_DATA_DAT_TAG; in mmc_blk_data_prep()
1418 mmc_set_data_timeout(&brq->data, card); in mmc_blk_data_prep()
1420 brq->data.sg = mqrq->sg; in mmc_blk_data_prep()
1421 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); in mmc_blk_data_prep()
1427 if (brq->data.blocks != blk_rq_sectors(req)) { in mmc_blk_data_prep()
1428 int i, data_size = brq->data.blocks << 9; in mmc_blk_data_prep()
1431 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { in mmc_blk_data_prep()
1439 brq->data.sg_len = i; in mmc_blk_data_prep()
1454 struct mmc_request *mrq = &mqrq->brq.mrq; in mmc_blk_cqe_complete_rq()
1521 brq.mrq); in mmc_blk_cqe_req_done()
1547 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_cqe_prep_dcmd() local
1549 memset(brq, 0, sizeof(*brq)); in mmc_blk_cqe_prep_dcmd()
1551 brq->mrq.cmd = &brq->cmd; in mmc_blk_cqe_prep_dcmd()
1552 brq->mrq.tag = req->tag; in mmc_blk_cqe_prep_dcmd()
1554 return &brq->mrq; in mmc_blk_cqe_prep_dcmd()
1579 mqrq->brq.mrq.done = mmc_blk_hsq_req_done; in mmc_blk_hsq_issue_rw_rq()
1580 mmc_pre_req(host, &mqrq->brq.mrq); in mmc_blk_hsq_issue_rw_rq()
1582 err = mmc_cqe_start_req(host, &mqrq->brq.mrq); in mmc_blk_hsq_issue_rw_rq()
1584 mmc_post_req(host, &mqrq->brq.mrq, err); in mmc_blk_hsq_issue_rw_rq()
1599 return mmc_blk_cqe_start_req(mq->card->host, &mqrq->brq.mrq); in mmc_blk_cqe_issue_rw_rq()
1608 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_rw_rq_prep() local
1615 brq->mrq.cmd = &brq->cmd; in mmc_blk_rw_rq_prep()
1617 brq->cmd.arg = blk_rq_pos(req); in mmc_blk_rw_rq_prep()
1619 brq->cmd.arg <<= 9; in mmc_blk_rw_rq_prep()
1620 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; in mmc_blk_rw_rq_prep()
1622 if (brq->data.blocks > 1 || do_rel_wr) { in mmc_blk_rw_rq_prep()
1628 brq->mrq.stop = &brq->stop; in mmc_blk_rw_rq_prep()
1632 brq->mrq.stop = NULL; in mmc_blk_rw_rq_prep()
1636 brq->cmd.opcode = rq_data_dir(req) == READ ? readcmd : writecmd; in mmc_blk_rw_rq_prep()
1656 if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && in mmc_blk_rw_rq_prep()
1659 brq->sbc.opcode = MMC_SET_BLOCK_COUNT; in mmc_blk_rw_rq_prep()
1660 brq->sbc.arg = brq->data.blocks | in mmc_blk_rw_rq_prep()
1663 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; in mmc_blk_rw_rq_prep()
1664 brq->mrq.sbc = &brq->sbc; in mmc_blk_rw_rq_prep()
1687 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_fix_state() local
1688 unsigned int timeout = mmc_blk_data_timeout_ms(card->host, &brq->data); in mmc_blk_fix_state()
1708 struct mmc_request *mrq = &mqrq->brq.mrq; in mmc_blk_read_single()
1759 static inline bool mmc_blk_oor_valid(struct mmc_blk_request *brq) in mmc_blk_oor_valid() argument
1761 return !!brq->mrq.sbc; in mmc_blk_oor_valid()
1764 static inline u32 mmc_blk_stop_err_bits(struct mmc_blk_request *brq) in mmc_blk_stop_err_bits() argument
1766 return mmc_blk_oor_valid(brq) ? CMD_ERRORS : CMD_ERRORS_EXCL_OOR; in mmc_blk_stop_err_bits()
1776 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_status_error() local
1783 stop_err_bits = mmc_blk_stop_err_bits(brq); in mmc_blk_status_error()
1785 return brq->cmd.resp[0] & CMD_ERRORS || in mmc_blk_status_error()
1786 brq->stop.resp[0] & stop_err_bits || in mmc_blk_status_error()
1791 static inline bool mmc_blk_cmd_started(struct mmc_blk_request *brq) in mmc_blk_cmd_started() argument
1793 return !brq->sbc.error && !brq->cmd.error && in mmc_blk_cmd_started()
1794 !(brq->cmd.resp[0] & CMD_ERRORS); in mmc_blk_cmd_started()
1817 struct mmc_blk_request *brq = &mqrq->brq; in mmc_blk_mq_rw_recovery() local
1830 brq->data.bytes_xfered = 0; in mmc_blk_mq_rw_recovery()
1857 if (!err && mmc_blk_cmd_started(brq) && mmc_card_sd(card) && in mmc_blk_mq_rw_recovery()
1860 brq->data.bytes_xfered = 0; in mmc_blk_mq_rw_recovery()
1862 brq->data.bytes_xfered = blocks << 9; in mmc_blk_mq_rw_recovery()
1877 if (brq->data.bytes_xfered) in mmc_blk_mq_rw_recovery()
1886 if (brq->sbc.error || brq->cmd.error) in mmc_blk_mq_rw_recovery()
1895 if (rq_data_dir(req) == READ && brq->data.blocks > in mmc_blk_mq_rw_recovery()
1903 static inline bool mmc_blk_rq_error(struct mmc_blk_request *brq) in mmc_blk_rq_error() argument
1905 mmc_blk_eval_resp_error(brq); in mmc_blk_rq_error()
1907 return brq->sbc.error || brq->cmd.error || brq->stop.error || in mmc_blk_rq_error()
1908 brq->data.error || brq->cmd.resp[0] & CMD_ERRORS; in mmc_blk_rq_error()
1965 mqrq->brq.data.bytes_xfered = 0; in mmc_blk_card_busy()
1978 if (cb_data.status & mmc_blk_stop_err_bits(&mqrq->brq)) { in mmc_blk_card_busy()
1979 mqrq->brq.data.bytes_xfered = 0; in mmc_blk_card_busy()
1985 mqrq->brq.cmd.resp[0] |= R1_EXCEPTION_EVENT; in mmc_blk_card_busy()
2001 unsigned int nr_bytes = mqrq->brq.data.bytes_xfered; in mmc_blk_mq_complete_rq()
2023 (mqrq->brq.cmd.resp[0] & R1_EXCEPTION_EVENT || in mmc_blk_urgent_bkops_needed()
2024 mqrq->brq.stop.resp[0] & R1_EXCEPTION_EVENT); in mmc_blk_urgent_bkops_needed()
2037 container_of(mrq, struct mmc_queue_req, brq.mrq); in mmc_blk_hsq_req_done()
2044 if (mmc_blk_rq_error(&mqrq->brq) || in mmc_blk_hsq_req_done()
2086 if (mmc_blk_rq_error(&mqrq->brq) || in mmc_blk_mq_poll_completion()
2118 struct mmc_request *mrq = &mqrq->brq.mrq; in mmc_blk_mq_post_req()
2148 if (mmc_blk_rq_error(&mqrq->brq)) { in mmc_blk_mq_recovery()
2193 brq.mrq); in mmc_blk_mq_req_done()
2230 if (mmc_blk_rq_error(&mqrq->brq) || in mmc_blk_mq_req_done()
2294 mqrq->brq.mrq.done = mmc_blk_mq_req_done; in mmc_blk_mq_issue_rw_rq()
2296 mmc_pre_req(host, &mqrq->brq.mrq); in mmc_blk_mq_issue_rw_rq()
2304 err = mmc_start_request(host, &mqrq->brq.mrq); in mmc_blk_mq_issue_rw_rq()
2318 mmc_post_req(host, &mqrq->brq.mrq, err); in mmc_blk_mq_issue_rw_rq()