Lines Matching full:nand

10 #define pr_fmt(fmt)	"nand: " fmt
13 #include <linux/mtd/nand.h>
17 * @nand: NAND device
22 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos) in nanddev_isbad() argument
24 if (nanddev_bbt_is_initialized(nand)) { in nanddev_isbad()
28 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_isbad()
29 status = nanddev_bbt_get_block_status(nand, entry); in nanddev_isbad()
32 if (nand->ops->isbad(nand, pos)) in nanddev_isbad()
37 nanddev_bbt_set_block_status(nand, entry, status); in nanddev_isbad()
47 return nand->ops->isbad(nand, pos); in nanddev_isbad()
53 * @nand: NAND device
57 * calls the low-level markbad hook (nand->ops->markbad()).
61 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos) in nanddev_markbad() argument
63 struct mtd_info *mtd = nanddev_to_mtd(nand); in nanddev_markbad()
67 if (nanddev_isbad(nand, pos)) in nanddev_markbad()
70 ret = nand->ops->markbad(nand, pos); in nanddev_markbad()
73 nanddev_pos_to_offs(nand, pos), ret); in nanddev_markbad()
75 if (!nanddev_bbt_is_initialized(nand)) in nanddev_markbad()
78 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_markbad()
79 ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN); in nanddev_markbad()
83 ret = nanddev_bbt_update(nand); in nanddev_markbad()
95 * @nand: NAND device
96 * @pos: NAND position to test
102 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos) in nanddev_isreserved() argument
107 if (!nanddev_bbt_is_initialized(nand)) in nanddev_isreserved()
111 entry = nanddev_bbt_pos_to_entry(nand, pos); in nanddev_isreserved()
112 status = nanddev_bbt_get_block_status(nand, entry); in nanddev_isreserved()
118 * nanddev_erase() - Erase a NAND portion
119 * @nand: NAND device
126 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos) in nanddev_erase() argument
128 if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) { in nanddev_erase()
130 nanddev_pos_to_offs(nand, pos)); in nanddev_erase()
134 return nand->ops->erase(nand, pos); in nanddev_erase()
139 * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
144 * concerned by @einfo and calling nand->ops->erase() on each of them.
147 * because there's no locking here. NAND specialized layers should instead
155 struct nand_device *nand = mtd_to_nanddev(mtd); in nanddev_mtd_erase() local
159 nanddev_offs_to_pos(nand, einfo->addr, &pos); in nanddev_mtd_erase()
160 nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last); in nanddev_mtd_erase()
162 ret = nanddev_erase(nand, &pos); in nanddev_mtd_erase()
164 einfo->fail_addr = nanddev_pos_to_offs(nand, &pos); in nanddev_mtd_erase()
169 nanddev_pos_next_eraseblock(nand, &pos); in nanddev_mtd_erase()
178 * a specific region of the NAND device
180 * @offs: offset of the NAND region
181 * @len: length of the NAND region
184 * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
191 struct nand_device *nand = mtd_to_nanddev(mtd); in nanddev_mtd_max_bad_blocks() local
195 if (!nand->memorg.max_bad_eraseblocks_per_lun) in nanddev_mtd_max_bad_blocks()
198 nanddev_offs_to_pos(nand, offs, &pos); in nanddev_mtd_max_bad_blocks()
199 nanddev_offs_to_pos(nand, offs + len, &end); in nanddev_mtd_max_bad_blocks()
201 for (nanddev_offs_to_pos(nand, offs, &pos); in nanddev_mtd_max_bad_blocks()
203 nanddev_pos_next_lun(nand, &pos)) in nanddev_mtd_max_bad_blocks()
204 max_bb += nand->memorg.max_bad_eraseblocks_per_lun; in nanddev_mtd_max_bad_blocks()
211 * nanddev_init() - Initialize a NAND device
212 * @nand: NAND device
213 * @ops: NAND device operations
214 * @owner: NAND device owner
216 * Initializes a NAND device object. Consistency checks are done on @ops and
217 * @nand->memorg. Also takes care of initializing the BBT.
221 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, in nanddev_init() argument
224 struct mtd_info *mtd = nanddev_to_mtd(nand); in nanddev_init()
225 struct nand_memory_organization *memorg = nanddev_get_memorg(nand); in nanddev_init()
227 if (!nand || !ops) in nanddev_init()
239 nand->rowconv.eraseblock_addr_shift = in nanddev_init()
241 nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) + in nanddev_init()
242 nand->rowconv.eraseblock_addr_shift; in nanddev_init()
244 nand->ops = ops; in nanddev_init()
253 mtd->size = nanddev_size(nand); in nanddev_init()
256 return nanddev_bbt_init(nand); in nanddev_init()
262 * @nand: NAND device
266 void nanddev_cleanup(struct nand_device *nand) in nanddev_cleanup() argument
268 if (nanddev_bbt_is_initialized(nand)) in nanddev_cleanup()
269 nanddev_bbt_cleanup(nand); in nanddev_cleanup()
273 MODULE_DESCRIPTION("Generic NAND framework");