Lines Matching refs:rqd
79 static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd) in __pblk_end_io_erase() argument
87 line = pblk_ppa_to_line(pblk, rqd->ppa_addr); in __pblk_end_io_erase()
88 pos = pblk_ppa_to_pos(geo, rqd->ppa_addr); in __pblk_end_io_erase()
93 if (rqd->error) { in __pblk_end_io_erase()
95 &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED); in __pblk_end_io_erase()
98 pblk_mark_bb(pblk, line, rqd->ppa_addr); in __pblk_end_io_erase()
101 &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE); in __pblk_end_io_erase()
106 trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr, in __pblk_end_io_erase()
113 static void pblk_end_io_erase(struct nvm_rq *rqd) in pblk_end_io_erase() argument
115 struct pblk *pblk = rqd->private; in pblk_end_io_erase()
117 __pblk_end_io_erase(pblk, rqd); in pblk_end_io_erase()
118 mempool_free(rqd, &pblk->e_rq_pool); in pblk_end_io_erase()
241 int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) in pblk_alloc_rqd_meta() argument
245 rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, in pblk_alloc_rqd_meta()
246 &rqd->dma_meta_list); in pblk_alloc_rqd_meta()
247 if (!rqd->meta_list) in pblk_alloc_rqd_meta()
250 if (rqd->nr_ppas == 1) in pblk_alloc_rqd_meta()
253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk); in pblk_alloc_rqd_meta()
254 rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size(pblk); in pblk_alloc_rqd_meta()
259 void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd) in pblk_free_rqd_meta() argument
263 if (rqd->meta_list) in pblk_free_rqd_meta()
264 nvm_dev_dma_free(dev->parent, rqd->meta_list, in pblk_free_rqd_meta()
265 rqd->dma_meta_list); in pblk_free_rqd_meta()
272 struct nvm_rq *rqd; in pblk_alloc_rqd() local
290 rqd = mempool_alloc(pool, GFP_KERNEL); in pblk_alloc_rqd()
291 memset(rqd, 0, rq_size); in pblk_alloc_rqd()
293 return rqd; in pblk_alloc_rqd()
297 void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) in pblk_free_rqd() argument
303 kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap); in pblk_free_rqd()
319 pblk_free_rqd_meta(pblk, rqd); in pblk_free_rqd()
320 mempool_free(rqd, pool); in pblk_free_rqd()
473 void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd) in pblk_log_write_err() argument
477 pblk_print_failed_rqd(pblk, rqd, rqd->error); in pblk_log_write_err()
481 void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd) in pblk_log_read_err() argument
484 if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) { in pblk_log_read_err()
489 switch (rqd->error) { in pblk_log_read_err()
498 pblk_err(pblk, "unknown read error:%d\n", rqd->error); in pblk_log_read_err()
501 pblk_print_failed_rqd(pblk, rqd, rqd->error); in pblk_log_read_err()
510 int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd, void *buf) in pblk_submit_io() argument
517 if (pblk_check_io(pblk, rqd)) in pblk_submit_io()
521 return nvm_submit_io(dev, rqd, buf); in pblk_submit_io()
524 void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd) in pblk_check_chunk_state_update() argument
526 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_check_chunk_state_update()
530 for (i = 0; i < rqd->nr_ppas; i++) { in pblk_check_chunk_state_update()
544 int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd, void *buf) in pblk_submit_io_sync() argument
552 if (pblk_check_io(pblk, rqd)) in pblk_submit_io_sync()
556 ret = nvm_submit_io_sync(dev, rqd, buf); in pblk_submit_io_sync()
559 rqd->opcode == NVM_OP_PWRITE) in pblk_submit_io_sync()
560 pblk_check_chunk_state_update(pblk, rqd); in pblk_submit_io_sync()
565 static int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd, in pblk_submit_io_sync_sem() argument
568 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_submit_io_sync_sem()
572 ret = pblk_submit_io_sync(pblk, rqd, buf); in pblk_submit_io_sync_sem()
681 struct nvm_rq rqd; in pblk_line_smeta_read() local
685 memset(&rqd, 0, sizeof(struct nvm_rq)); in pblk_line_smeta_read()
687 ret = pblk_alloc_rqd_meta(pblk, &rqd); in pblk_line_smeta_read()
691 rqd.opcode = NVM_OP_PREAD; in pblk_line_smeta_read()
692 rqd.nr_ppas = lm->smeta_sec; in pblk_line_smeta_read()
693 rqd.is_seq = 1; in pblk_line_smeta_read()
694 ppa_list = nvm_rq_to_ppa_list(&rqd); in pblk_line_smeta_read()
699 ret = pblk_submit_io_sync(pblk, &rqd, line->smeta); in pblk_line_smeta_read()
707 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) { in pblk_line_smeta_read()
708 pblk_log_read_err(pblk, &rqd); in pblk_line_smeta_read()
713 pblk_free_rqd_meta(pblk, &rqd); in pblk_line_smeta_read()
722 struct nvm_rq rqd; in pblk_line_smeta_write() local
727 memset(&rqd, 0, sizeof(struct nvm_rq)); in pblk_line_smeta_write()
729 ret = pblk_alloc_rqd_meta(pblk, &rqd); in pblk_line_smeta_write()
733 rqd.opcode = NVM_OP_PWRITE; in pblk_line_smeta_write()
734 rqd.nr_ppas = lm->smeta_sec; in pblk_line_smeta_write()
735 rqd.is_seq = 1; in pblk_line_smeta_write()
736 ppa_list = nvm_rq_to_ppa_list(&rqd); in pblk_line_smeta_write()
740 rqd.meta_list, i); in pblk_line_smeta_write()
746 ret = pblk_submit_io_sync_sem(pblk, &rqd, line->smeta); in pblk_line_smeta_write()
754 if (rqd.error) { in pblk_line_smeta_write()
755 pblk_log_write_err(pblk, &rqd); in pblk_line_smeta_write()
760 pblk_free_rqd_meta(pblk, &rqd); in pblk_line_smeta_write()
772 struct nvm_rq rqd; in pblk_line_emeta_read() local
791 memset(&rqd, 0, sizeof(struct nvm_rq)); in pblk_line_emeta_read()
796 rqd.meta_list = meta_list; in pblk_line_emeta_read()
797 rqd.ppa_list = ppa_list_buf; in pblk_line_emeta_read()
798 rqd.dma_meta_list = dma_meta_list; in pblk_line_emeta_read()
799 rqd.dma_ppa_list = dma_ppa_list; in pblk_line_emeta_read()
800 rqd.opcode = NVM_OP_PREAD; in pblk_line_emeta_read()
801 rqd.nr_ppas = rq_ppas; in pblk_line_emeta_read()
802 ppa_list = nvm_rq_to_ppa_list(&rqd); in pblk_line_emeta_read()
804 for (i = 0; i < rqd.nr_ppas; ) { in pblk_line_emeta_read()
809 rqd.is_seq = 1; in pblk_line_emeta_read()
831 ret = pblk_submit_io_sync(pblk, &rqd, emeta_buf); in pblk_line_emeta_read()
839 if (rqd.error && rqd.error != NVM_RSP_WARN_HIGHECC) { in pblk_line_emeta_read()
840 pblk_log_read_err(pblk, &rqd); in pblk_line_emeta_read()
851 nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list); in pblk_line_emeta_read()
855 static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd, in pblk_setup_e_rq() argument
858 rqd->opcode = NVM_OP_ERASE; in pblk_setup_e_rq()
859 rqd->ppa_addr = ppa; in pblk_setup_e_rq()
860 rqd->nr_ppas = 1; in pblk_setup_e_rq()
861 rqd->is_seq = 1; in pblk_setup_e_rq()
862 rqd->bio = NULL; in pblk_setup_e_rq()
867 struct nvm_rq rqd = {NULL}; in pblk_blk_erase_sync() local
873 pblk_setup_e_rq(pblk, &rqd, ppa); in pblk_blk_erase_sync()
878 ret = pblk_submit_io_sync(pblk, &rqd, NULL); in pblk_blk_erase_sync()
879 rqd.private = pblk; in pblk_blk_erase_sync()
880 __pblk_end_io_erase(pblk, &rqd); in pblk_blk_erase_sync()
1444 void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd) in pblk_rq_to_line_put() argument
1446 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_rq_to_line_put()
1449 for (i = 0; i < rqd->nr_ppas; i++) in pblk_rq_to_line_put()
1691 struct nvm_rq *rqd; in pblk_blk_erase_async() local
1694 rqd = pblk_alloc_rqd(pblk, PBLK_ERASE); in pblk_blk_erase_async()
1696 pblk_setup_e_rq(pblk, rqd, ppa); in pblk_blk_erase_async()
1698 rqd->end_io = pblk_end_io_erase; in pblk_blk_erase_async()
1699 rqd->private = pblk; in pblk_blk_erase_async()
1707 err = pblk_submit_io(pblk, rqd, NULL); in pblk_blk_erase_async()
2115 void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd) in pblk_get_meta_for_writes() argument
2121 buffer = rqd->meta_list; in pblk_get_meta_for_writes()
2127 rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); in pblk_get_meta_for_writes()
2133 void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd) in pblk_get_packed_meta() argument
2135 void *meta_list = rqd->meta_list; in pblk_get_packed_meta()
2142 page = page_to_virt(rqd->bio->bi_io_vec[rqd->bio->bi_vcnt - 1].bv_page); in pblk_get_packed_meta()
2144 for (; i < rqd->nr_ppas; i++) in pblk_get_packed_meta()