Home
last modified time | relevance | path

Searched refs:ppa_list (Results 1 – 10 of 10) sorted by relevance

/Linux-v4.19/drivers/lightnvm/
Dpblk-recovery.c100 struct ppa_addr *ppa_list; member
113 struct ppa_addr *ppa_list; in pblk_recov_read_oob() local
125 ppa_list = p.ppa_list; in pblk_recov_read_oob()
157 rqd->ppa_list = ppa_list; in pblk_recov_read_oob()
180 rqd->ppa_list[i] = in pblk_recov_read_oob()
207 pblk_update_map(pblk, lba, rqd->ppa_list[i]); in pblk_recov_read_oob()
229 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); in pblk_end_io_recov()
242 struct ppa_addr *ppa_list; in pblk_recov_pad_oob() local
288 ppa_list = (void *)(meta_list) + pblk_dma_meta_size; in pblk_recov_pad_oob()
308 rqd->ppa_list = ppa_list; in pblk_recov_pad_oob()
[all …]
Dpblk-map.c22 struct ppa_addr *ppa_list, in pblk_map_page_data() argument
57 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); in pblk_map_page_data()
69 w_ctx->ppa = ppa_list[i]; in pblk_map_page_data()
82 pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap); in pblk_map_page_data()
97 if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i], in pblk_map_rq()
122 if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i], in pblk_map_erase_rq()
129 erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]); in pblk_map_erase_rq()
144 *erase_ppa = rqd->ppa_list[i]; in pblk_map_erase_rq()
Dpblk-read.c87 rqd->ppa_list[j++] = p; in pblk_read_ppalist_rq()
123 p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr; in pblk_read_check_seq()
156 p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr; in pblk_read_check_rand()
172 struct ppa_addr *ppa_list; in pblk_read_put_rqd_kref() local
175 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; in pblk_read_put_rqd_kref()
178 struct ppa_addr ppa = ppa_list[i]; in pblk_read_put_rqd_kref()
255 rqd->ppa_list = pr_ctx->ppa_ptr; in pblk_end_partial_read()
257 rqd->ppa_list[0] = ppa; in pblk_end_partial_read()
261 lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); in pblk_end_partial_read()
262 lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size); in pblk_end_partial_read()
[all …]
Dpblk-write.c85 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap); in pblk_complete_write()
211 struct ppa_addr *ppa_list; in pblk_submit_rec() local
216 ppa_list = &rqd->ppa_addr; in pblk_submit_rec()
218 ppa_list = rqd->ppa_list; in pblk_submit_rec()
220 pblk_map_remaining(pblk, ppa_list); in pblk_submit_rec()
223 pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap); in pblk_submit_rec()
278 pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); in pblk_end_io_write_meta()
314 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; in pblk_alloc_w_rq()
417 rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); in pblk_submit_meta_io()
427 pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); in pblk_submit_meta_io()
[all …]
Dcore.c578 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_tgt_to_dev() argument
583 nvm_map_to_dev(tgt_dev, &ppa_list[i]); in nvm_ppa_tgt_to_dev()
584 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]); in nvm_ppa_tgt_to_dev()
589 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_dev_to_tgt() argument
594 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]); in nvm_ppa_dev_to_tgt()
595 nvm_map_to_tgt(tgt_dev, &ppa_list[i]); in nvm_ppa_dev_to_tgt()
606 nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas); in nvm_rq_tgt_to_dev()
616 nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas); in nvm_rq_dev_to_tgt()
686 rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list); in nvm_set_rqd_ppalist()
687 if (!rqd->ppa_list) { in nvm_set_rqd_ppalist()
[all …]
Dpblk-core.c635 void *ppa_list, *meta_list; in pblk_line_submit_emeta_io() local
661 ppa_list = meta_list + pblk_dma_meta_size; in pblk_line_submit_emeta_io()
682 rqd.ppa_list = ppa_list; in pblk_line_submit_emeta_io()
698 rqd.ppa_list[i] = in pblk_line_submit_emeta_io()
735 rqd.ppa_list[i] = in pblk_line_submit_emeta_io()
811 rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size; in pblk_line_submit_smeta_io()
831 rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); in pblk_line_submit_smeta_io()
1850 static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, in __pblk_down_page() argument
1864 WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun || in __pblk_down_page()
1865 ppa_list[0].a.ch != ppa_list[i].a.ch); in __pblk_down_page()
[all …]
Dpblk.h822 void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
823 void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
825 void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
826 void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
1334 print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error); in pblk_print_failed_rqd()
1378 struct ppa_addr *ppa_list; in pblk_check_io() local
1380 ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; in pblk_check_io()
1382 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) { in pblk_check_io()
1393 ppa = ppa_list[i]; in pblk_check_io()
/Linux-v4.19/include/uapi/linux/
Dlightnvm.h147 __u64 ppa_list; member
166 __u64 ppa_list; member
/Linux-v4.19/drivers/nvme/host/
Dlightnvm.c781 __le64 *ppa_list = NULL; in nvme_nvm_submit_user_cmd() local
798 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); in nvme_nvm_submit_user_cmd()
799 if (!ppa_list) { in nvme_nvm_submit_user_cmd()
803 if (copy_from_user(ppa_list, (void __user *)ppa_buf, in nvme_nvm_submit_user_cmd()
864 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); in nvme_nvm_submit_user_cmd()
896 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas, in nvme_nvm_submit_vio()
943 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas, in nvme_nvm_user_vcmd()
/Linux-v4.19/include/linux/
Dlightnvm.h294 struct ppa_addr *ppa_list; member