| /Linux-v5.4/drivers/lightnvm/ |
| D | pblk-map.c | 23 struct ppa_addr *ppa_list, in pblk_map_page_data() argument 65 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); in pblk_map_page_data() 78 w_ctx->ppa = ppa_list[i]; in pblk_map_page_data() 92 pblk_down_rq(pblk, ppa_list[0], lun_bitmap); in pblk_map_page_data() 102 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_map_rq() local 112 ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i], in pblk_map_rq() 131 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_map_erase_rq() local 143 ret = pblk_map_page_data(pblk, sentry + i, &ppa_list[i], in pblk_map_erase_rq() 148 erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]); in pblk_map_erase_rq() 163 *erase_ppa = ppa_list[i]; in pblk_map_erase_rq()
|
| D | pblk-read.c | 48 nr_secs = pblk_lookup_l2p_seq(pblk, rqd->ppa_list, blba, rqd->nr_ppas, in pblk_read_ppalist_rq() 58 if (pblk_ppa_empty(rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 62 } else if (pblk_addr_in_cache(rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 69 rqd->ppa_list[i])) { in pblk_read_ppalist_rq() 128 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_read_check_seq() local 130 print_ppa(pblk, &ppa_list[i], "seq", i); in pblk_read_check_seq() 164 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_read_check_rand() local 166 print_ppa(pblk, &ppa_list[j], "rnd", j); in pblk_read_check_rand() 337 rqd->ppa_addr = rqd->ppa_list[0]; in pblk_submit_read() 373 rqd->ppa_list[valid_secs++] = ppa_list_l2p[i]; in read_ppalist_rq_gc() [all …]
|
| D | pblk-recovery.c | 143 struct ppa_addr *ppa_list; member 160 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_end_io_recov() local 164 pblk_up_chunk(pblk, ppa_list[0]); in pblk_end_io_recov() 181 struct ppa_addr *ppa_list; in pblk_recov_pad_line() local 229 ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_recov_pad_line() 257 ppa_list[i] = dev_ppa; in pblk_recov_pad_line() 262 pblk_down_chunk(pblk, ppa_list[0]); in pblk_recov_pad_line() 267 pblk_up_chunk(pblk, ppa_list[0]); in pblk_recov_pad_line() 360 struct ppa_addr *ppa_list; in pblk_recov_scan_oob() local 376 ppa_list = p.ppa_list; in pblk_recov_scan_oob() [all …]
|
| D | core.c | 590 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_tgt_to_dev() argument 595 nvm_map_to_dev(tgt_dev, &ppa_list[i]); in nvm_ppa_tgt_to_dev() 596 ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]); in nvm_ppa_tgt_to_dev() 601 struct ppa_addr *ppa_list, int nr_ppas) in nvm_ppa_dev_to_tgt() argument 606 ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]); in nvm_ppa_dev_to_tgt() 607 nvm_map_to_tgt(tgt_dev, &ppa_list[i]); in nvm_ppa_dev_to_tgt() 613 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in nvm_rq_tgt_to_dev() local 615 nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_tgt_to_dev() 620 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in nvm_rq_dev_to_tgt() local 622 nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas); in nvm_rq_dev_to_tgt() [all …]
|
| D | pblk-write.c | 215 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_submit_rec() local 219 pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas); in pblk_submit_rec() 278 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_end_io_write_meta() local 281 pblk_up_chunk(pblk, ppa_list[0]); in pblk_end_io_write_meta() 374 struct ppa_addr *ppa_list; in pblk_submit_meta_io() local 397 ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_submit_meta_io() 403 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); in pblk_submit_meta_io() 412 pblk_down_chunk(pblk, ppa_list[0]); in pblk_submit_meta_io() 423 pblk_up_chunk(pblk, ppa_list[0]); in pblk_submit_meta_io()
|
| D | pblk-core.c | 253 rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size(pblk); in pblk_alloc_rqd_meta() 526 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_check_chunk_state_update() local 531 struct ppa_addr *ppa = &ppa_list[i]; in pblk_check_chunk_state_update() 568 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_submit_io_sync_sem() local 571 pblk_down_chunk(pblk, ppa_list[0]); in pblk_submit_io_sync_sem() 573 pblk_up_chunk(pblk, ppa_list[0]); in pblk_submit_io_sync_sem() 680 struct ppa_addr *ppa_list; in pblk_line_smeta_read() local 694 ppa_list = nvm_rq_to_ppa_list(&rqd); in pblk_line_smeta_read() 697 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id); in pblk_line_smeta_read() 721 struct ppa_addr *ppa_list; in pblk_line_smeta_write() local [all …]
|
| D | pblk.h | 1224 print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error); in pblk_print_failed_rqd() 1268 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd); in pblk_check_io() local 1270 if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) { in pblk_check_io() 1280 line = pblk_ppa_to_line(pblk, ppa_list[i]); in pblk_check_io()
|
| /Linux-v5.4/include/uapi/linux/ |
| D | lightnvm.h | 147 __u64 ppa_list; member 166 __u64 ppa_list; member
|
| /Linux-v5.4/drivers/nvme/host/ |
| D | lightnvm.c | 763 __le64 *ppa_list = NULL; in nvme_nvm_submit_user_cmd() local 780 ppa_list = dma_pool_alloc(dev->dma_pool, GFP_KERNEL, &ppa_dma); in nvme_nvm_submit_user_cmd() 781 if (!ppa_list) { in nvme_nvm_submit_user_cmd() 785 if (copy_from_user(ppa_list, (void __user *)ppa_buf, in nvme_nvm_submit_user_cmd() 846 dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); in nvme_nvm_submit_user_cmd() 878 (void __user *)(uintptr_t)vio.ppa_list, vio.nppas, in nvme_nvm_submit_vio() 925 (void __user *)(uintptr_t)vcmd.ppa_list, vcmd.nppas, in nvme_nvm_user_vcmd()
|
| /Linux-v5.4/include/linux/ |
| D | lightnvm.h | 292 struct ppa_addr *ppa_list; member 323 return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; in nvm_rq_to_ppa_list()
|