Lines Matching +full:nand +full:- +full:ecc +full:- +full:mode
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright 2017 - Free Electrons
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
18 * struct nand_memory_organization - Memory organization structure
19 * @bits_per_cell: number of bits per NAND cell
27 * @ntargets: total number of targets exposed by the NAND device
55 * struct nand_row_converter - Information needed to convert an absolute offset
67 * struct nand_pos - NAND position object
68 * @target: the NAND target/die
74 * These information are usually used by specific sub-layers to select the
86 * enum nand_page_io_req_type - Direction of an I/O request
96 * struct nand_page_io_req - NAND I/O request object
105 * @mode: one of the %MTD_OPS_XXX mode
107 * This object is used to pass per-page I/O requests to NAND sub-layers. This
109 * specific NAND layers can focus on translating these information into
127 int mode; member
135 * enum nand_ecc_engine_type - NAND ECC engine type
137 * @NAND_ECC_ENGINE_TYPE_NONE: No ECC correction
138 * @NAND_ECC_ENGINE_TYPE_SOFT: Software ECC correction
139 * @NAND_ECC_ENGINE_TYPE_ON_HOST: On host hardware ECC correction
140 * @NAND_ECC_ENGINE_TYPE_ON_DIE: On chip hardware ECC correction
151 * enum nand_ecc_placement - NAND ECC bytes placement
152 * @NAND_ECC_PLACEMENT_UNKNOWN: The actual position of the ECC bytes is unknown
153 * @NAND_ECC_PLACEMENT_OOB: The ECC bytes are located in the OOB area
154 * @NAND_ECC_PLACEMENT_INTERLEAVED: Syndrome layout, there are ECC bytes
165 * enum nand_ecc_algo - NAND ECC algorithm
168 * @NAND_ECC_ALGO_BCH: Bose-Chaudhuri-Hocquenghem algorithm
169 * @NAND_ECC_ALGO_RS: Reed-Solomon algorithm
179 * struct nand_ecc_props - NAND ECC properties
180 * @engine_type: ECC engine type
182 * @algo: ECC algorithm (if relevant)
183 * @strength: ECC strength
198 /* NAND ECC misc flags */
202 * struct nand_bbt - bad block table object
210 * struct nand_ops - NAND operations
212 * erasing, this has been taken care of by the generic NAND layer
215 * NAND layer. This method should just write the BBM (Bad Block
216 * Marker) so that future call to struct_nand_ops->isbad() return
223 * NAND layers (SPI NAND, raw NAND, ...).
226 int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
227 int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
228 bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
232 * struct nand_ecc_context - Context for the ECC engine
233 * @conf: basic ECC engine parameters
234 * @total: total number of bytes used for storing ECC codes, this is used by
236 * @priv: ECC engine driver private data
245 * struct nand_ecc_engine_ops - ECC engine operations
246 * @init_ctx: given a desired user configuration for the pointed NAND device,
247 * requests the ECC engine driver to setup a configuration with
251 * request to be performed with ECC correction.
253 * request and ensure proper ECC correction.
256 int (*init_ctx)(struct nand_device *nand);
257 void (*cleanup_ctx)(struct nand_device *nand);
258 int (*prepare_io_req)(struct nand_device *nand,
260 int (*finish_io_req)(struct nand_device *nand,
265 * struct nand_ecc_engine - ECC engine abstraction for NAND devices
266 * @ops: ECC engine operations
272 void of_get_nand_ecc_user_config(struct nand_device *nand);
273 int nand_ecc_init_ctx(struct nand_device *nand);
274 void nand_ecc_cleanup_ctx(struct nand_device *nand);
275 int nand_ecc_prepare_io_req(struct nand_device *nand,
277 int nand_ecc_finish_io_req(struct nand_device *nand,
279 bool nand_ecc_is_strong_enough(struct nand_device *nand);
282 * struct nand_ecc - Information relative to the ECC
284 * @requirements: ECC requirements from the NAND chip perspective
285 * @user_conf: User desires in terms of ECC parameters
286 * @ctx: ECC context for the ECC engine, derived from the device @requirements
288 * @ondie_engine: On-die ECC engine reference, if any
289 * @engine: ECC engine actually bound
301 * struct nand_device - NAND device
302 * @mtd: MTD instance attached to the NAND device
304 * @ecc: NAND ECC object attached to the NAND device
307 * @ops: NAND operations attached to the NAND device
309 * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
310 * should declare their own NAND object embedding a nand_device struct (that's
312 * struct_nand_device->memorg and struct_nand_device->ecc.requirements should
313 * be filled at device detection time to reflect the NAND device
315 * It will take care of converting NAND information into MTD ones, which means
316 * the specialized NAND layers should never manually tweak
317 * struct_nand_device->mtd except for the ->_read/write() hooks.
322 struct nand_ecc ecc; member
329 * struct nand_io_iter - NAND I/O iterator
335 * Can be used by specialized NAND layers to iterate over all pages covered
336 * by an MTD I/O request, which should greatly simplifies the boiler-plate
337 * code needed to read/write data from/to a NAND device.
347 * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
350 * Return: the NAND device embedding @mtd.
358 * nanddev_to_mtd() - Get the MTD device attached to a NAND device
359 * @nand: NAND device
361 * Return: the MTD device embedded in @nand.
363 static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) in nanddev_to_mtd() argument
365 return &nand->mtd; in nanddev_to_mtd()
369 * nanddev_bits_per_cell() - Get the number of bits per cell
370 * @nand: NAND device
374 static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) in nanddev_bits_per_cell() argument
376 return nand->memorg.bits_per_cell; in nanddev_bits_per_cell()
380 * nanddev_page_size() - Get NAND page size
381 * @nand: NAND device
385 static inline size_t nanddev_page_size(const struct nand_device *nand) in nanddev_page_size() argument
387 return nand->memorg.pagesize; in nanddev_page_size()
391 * nanddev_per_page_oobsize() - Get NAND OOB size
392 * @nand: NAND device
397 nanddev_per_page_oobsize(const struct nand_device *nand) in nanddev_per_page_oobsize() argument
399 return nand->memorg.oobsize; in nanddev_per_page_oobsize()
403 * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
404 * @nand: NAND device
409 nanddev_pages_per_eraseblock(const struct nand_device *nand) in nanddev_pages_per_eraseblock() argument
411 return nand->memorg.pages_per_eraseblock; in nanddev_pages_per_eraseblock()
415 * nanddev_pages_per_target() - Get the number of pages per target
416 * @nand: NAND device
421 nanddev_pages_per_target(const struct nand_device *nand) in nanddev_pages_per_target() argument
423 return nand->memorg.pages_per_eraseblock * in nanddev_pages_per_target()
424 nand->memorg.eraseblocks_per_lun * in nanddev_pages_per_target()
425 nand->memorg.luns_per_target; in nanddev_pages_per_target()
429 * nanddev_per_page_oobsize() - Get NAND erase block size
430 * @nand: NAND device
434 static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) in nanddev_eraseblock_size() argument
436 return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; in nanddev_eraseblock_size()
440 * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
441 * @nand: NAND device
446 nanddev_eraseblocks_per_lun(const struct nand_device *nand) in nanddev_eraseblocks_per_lun() argument
448 return nand->memorg.eraseblocks_per_lun; in nanddev_eraseblocks_per_lun()
452 * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
453 * @nand: NAND device
458 nanddev_eraseblocks_per_target(const struct nand_device *nand) in nanddev_eraseblocks_per_target() argument
460 return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target; in nanddev_eraseblocks_per_target()
464 * nanddev_target_size() - Get the total size provided by a single target/die
465 * @nand: NAND device
469 static inline u64 nanddev_target_size(const struct nand_device *nand) in nanddev_target_size() argument
471 return (u64)nand->memorg.luns_per_target * in nanddev_target_size()
472 nand->memorg.eraseblocks_per_lun * in nanddev_target_size()
473 nand->memorg.pages_per_eraseblock * in nanddev_target_size()
474 nand->memorg.pagesize; in nanddev_target_size()
478 * nanddev_ntarget() - Get the total of targets
479 * @nand: NAND device
481 * Return: the number of targets/dies exposed by @nand.
483 static inline unsigned int nanddev_ntargets(const struct nand_device *nand) in nanddev_ntargets() argument
485 return nand->memorg.ntargets; in nanddev_ntargets()
489 * nanddev_neraseblocks() - Get the total number of eraseblocks
490 * @nand: NAND device
492 * Return: the total number of eraseblocks exposed by @nand.
494 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) in nanddev_neraseblocks() argument
496 return nand->memorg.ntargets * nand->memorg.luns_per_target * in nanddev_neraseblocks()
497 nand->memorg.eraseblocks_per_lun; in nanddev_neraseblocks()
501 * nanddev_size() - Get NAND size
502 * @nand: NAND device
504 * Return: the total size (in bytes) exposed by @nand.
506 static inline u64 nanddev_size(const struct nand_device *nand) in nanddev_size() argument
508 return nanddev_target_size(nand) * nanddev_ntargets(nand); in nanddev_size()
512 * nanddev_get_memorg() - Extract memory organization info from a NAND device
513 * @nand: NAND device
518 * Return: the memorg object embedded in the NAND device.
521 nanddev_get_memorg(struct nand_device *nand) in nanddev_get_memorg() argument
523 return &nand->memorg; in nanddev_get_memorg()
527 * nanddev_get_ecc_conf() - Extract the ECC configuration from a NAND device
528 * @nand: NAND device
531 nanddev_get_ecc_conf(struct nand_device *nand) in nanddev_get_ecc_conf() argument
533 return &nand->ecc.ctx.conf; in nanddev_get_ecc_conf()
537 * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND
539 * @nand: NAND device
542 nanddev_get_ecc_requirements(struct nand_device *nand) in nanddev_get_ecc_requirements() argument
544 return &nand->ecc.requirements; in nanddev_get_ecc_requirements()
548 * nanddev_set_ecc_requirements() - Assign the ECC requirements of a NAND
550 * @nand: NAND device
554 nanddev_set_ecc_requirements(struct nand_device *nand, in nanddev_set_ecc_requirements() argument
557 nand->ecc.requirements = *reqs; in nanddev_set_ecc_requirements()
560 int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
562 void nanddev_cleanup(struct nand_device *nand);
565 * nanddev_register() - Register a NAND device
566 * @nand: NAND device
568 * Register a NAND device.
570 * registering the MTD device embedded in @nand.
574 static inline int nanddev_register(struct nand_device *nand) in nanddev_register() argument
576 return mtd_device_register(&nand->mtd, NULL, 0); in nanddev_register()
580 * nanddev_unregister() - Unregister a NAND device
581 * @nand: NAND device
583 * Unregister a NAND device.
585 * unregistering the MTD device embedded in @nand.
589 static inline int nanddev_unregister(struct nand_device *nand) in nanddev_unregister() argument
591 return mtd_device_unregister(&nand->mtd); in nanddev_unregister()
595 * nanddev_set_of_node() - Attach a DT node to a NAND device
596 * @nand: NAND device
599 * Attach a DT node to a NAND device.
601 static inline void nanddev_set_of_node(struct nand_device *nand, in nanddev_set_of_node() argument
604 mtd_set_of_node(&nand->mtd, np); in nanddev_set_of_node()
608 * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
609 * @nand: NAND device
611 * Return: the DT node attached to @nand.
613 static inline struct device_node *nanddev_get_of_node(struct nand_device *nand) in nanddev_get_of_node() argument
615 return mtd_get_of_node(&nand->mtd); in nanddev_get_of_node()
619 * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
620 * @nand: NAND device
621 * @offs: absolute NAND offset (usually passed by the MTD layer)
622 * @pos: a NAND position object to fill in
626 * Return: the offset within the NAND page pointed by @pos.
628 static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, in nanddev_offs_to_pos() argument
635 pageoffs = do_div(tmp, nand->memorg.pagesize); in nanddev_offs_to_pos()
636 pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); in nanddev_offs_to_pos()
637 pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); in nanddev_offs_to_pos()
638 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; in nanddev_offs_to_pos()
639 pos->lun = do_div(tmp, nand->memorg.luns_per_target); in nanddev_offs_to_pos()
640 pos->target = tmp; in nanddev_offs_to_pos()
646 * nanddev_pos_cmp() - Compare two NAND positions
647 * @a: First NAND position
648 * @b: Second NAND position
650 * Compares two NAND positions.
652 * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
657 if (a->target != b->target) in nanddev_pos_cmp()
658 return a->target < b->target ? -1 : 1; in nanddev_pos_cmp()
660 if (a->lun != b->lun) in nanddev_pos_cmp()
661 return a->lun < b->lun ? -1 : 1; in nanddev_pos_cmp()
663 if (a->eraseblock != b->eraseblock) in nanddev_pos_cmp()
664 return a->eraseblock < b->eraseblock ? -1 : 1; in nanddev_pos_cmp()
666 if (a->page != b->page) in nanddev_pos_cmp()
667 return a->page < b->page ? -1 : 1; in nanddev_pos_cmp()
673 * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
674 * @nand: NAND device
675 * @pos: the NAND position to convert
677 * Converts @pos NAND position into an absolute offset.
683 static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, in nanddev_pos_to_offs() argument
688 npages = pos->page + in nanddev_pos_to_offs()
689 ((pos->eraseblock + in nanddev_pos_to_offs()
690 (pos->lun + in nanddev_pos_to_offs()
691 (pos->target * nand->memorg.luns_per_target)) * in nanddev_pos_to_offs()
692 nand->memorg.eraseblocks_per_lun) * in nanddev_pos_to_offs()
693 nand->memorg.pages_per_eraseblock); in nanddev_pos_to_offs()
695 return (loff_t)npages * nand->memorg.pagesize; in nanddev_pos_to_offs()
699 * nanddev_pos_to_row() - Extract a row address from a NAND position
700 * @nand: NAND device
703 * Converts a NAND position into a row address that can then be passed to the
708 static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, in nanddev_pos_to_row() argument
711 return (pos->lun << nand->rowconv.lun_addr_shift) | in nanddev_pos_to_row()
712 (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | in nanddev_pos_to_row()
713 pos->page; in nanddev_pos_to_row()
717 * nanddev_pos_next_target() - Move a position to the next target/die
718 * @nand: NAND device
722 * want to iterate over all targets/dies of a NAND device.
724 static inline void nanddev_pos_next_target(struct nand_device *nand, in nanddev_pos_next_target() argument
727 pos->page = 0; in nanddev_pos_next_target()
728 pos->plane = 0; in nanddev_pos_next_target()
729 pos->eraseblock = 0; in nanddev_pos_next_target()
730 pos->lun = 0; in nanddev_pos_next_target()
731 pos->target++; in nanddev_pos_next_target()
735 * nanddev_pos_next_lun() - Move a position to the next LUN
736 * @nand: NAND device
740 * iterate over all LUNs of a NAND device.
742 static inline void nanddev_pos_next_lun(struct nand_device *nand, in nanddev_pos_next_lun() argument
745 if (pos->lun >= nand->memorg.luns_per_target - 1) in nanddev_pos_next_lun()
746 return nanddev_pos_next_target(nand, pos); in nanddev_pos_next_lun()
748 pos->lun++; in nanddev_pos_next_lun()
749 pos->page = 0; in nanddev_pos_next_lun()
750 pos->plane = 0; in nanddev_pos_next_lun()
751 pos->eraseblock = 0; in nanddev_pos_next_lun()
755 * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
756 * @nand: NAND device
760 * want to iterate over all eraseblocks of a NAND device.
762 static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, in nanddev_pos_next_eraseblock() argument
765 if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) in nanddev_pos_next_eraseblock()
766 return nanddev_pos_next_lun(nand, pos); in nanddev_pos_next_eraseblock()
768 pos->eraseblock++; in nanddev_pos_next_eraseblock()
769 pos->page = 0; in nanddev_pos_next_eraseblock()
770 pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; in nanddev_pos_next_eraseblock()
774 * nanddev_pos_next_page() - Move a position to the next page
775 * @nand: NAND device
779 * iterate over all pages of a NAND device.
781 static inline void nanddev_pos_next_page(struct nand_device *nand, in nanddev_pos_next_page() argument
784 if (pos->page >= nand->memorg.pages_per_eraseblock - 1) in nanddev_pos_next_page()
785 return nanddev_pos_next_eraseblock(nand, pos); in nanddev_pos_next_page()
787 pos->page++; in nanddev_pos_next_page()
791 * nand_io_iter_init - Initialize a NAND I/O iterator
792 * @nand: NAND device
795 * @iter: NAND I/O iterator
797 * Initializes a NAND iterator based on the information passed by the MTD
800 static inline void nanddev_io_iter_init(struct nand_device *nand, in nanddev_io_iter_init() argument
805 struct mtd_info *mtd = nanddev_to_mtd(nand); in nanddev_io_iter_init()
807 iter->req.type = reqtype; in nanddev_io_iter_init()
808 iter->req.mode = req->mode; in nanddev_io_iter_init()
809 iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); in nanddev_io_iter_init()
810 iter->req.ooboffs = req->ooboffs; in nanddev_io_iter_init()
811 iter->oobbytes_per_page = mtd_oobavail(mtd, req); in nanddev_io_iter_init()
812 iter->dataleft = req->len; in nanddev_io_iter_init()
813 iter->oobleft = req->ooblen; in nanddev_io_iter_init()
814 iter->req.databuf.in = req->datbuf; in nanddev_io_iter_init()
815 iter->req.datalen = min_t(unsigned int, in nanddev_io_iter_init()
816 nand->memorg.pagesize - iter->req.dataoffs, in nanddev_io_iter_init()
817 iter->dataleft); in nanddev_io_iter_init()
818 iter->req.oobbuf.in = req->oobbuf; in nanddev_io_iter_init()
819 iter->req.ooblen = min_t(unsigned int, in nanddev_io_iter_init()
820 iter->oobbytes_per_page - iter->req.ooboffs, in nanddev_io_iter_init()
821 iter->oobleft); in nanddev_io_iter_init()
825 * nand_io_iter_next_page - Move to the next page
826 * @nand: NAND device
827 * @iter: NAND I/O iterator
831 static inline void nanddev_io_iter_next_page(struct nand_device *nand, in nanddev_io_iter_next_page() argument
834 nanddev_pos_next_page(nand, &iter->req.pos); in nanddev_io_iter_next_page()
835 iter->dataleft -= iter->req.datalen; in nanddev_io_iter_next_page()
836 iter->req.databuf.in += iter->req.datalen; in nanddev_io_iter_next_page()
837 iter->oobleft -= iter->req.ooblen; in nanddev_io_iter_next_page()
838 iter->req.oobbuf.in += iter->req.ooblen; in nanddev_io_iter_next_page()
839 iter->req.dataoffs = 0; in nanddev_io_iter_next_page()
840 iter->req.ooboffs = 0; in nanddev_io_iter_next_page()
841 iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, in nanddev_io_iter_next_page()
842 iter->dataleft); in nanddev_io_iter_next_page()
843 iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, in nanddev_io_iter_next_page()
844 iter->oobleft); in nanddev_io_iter_next_page()
848 * nand_io_iter_end - Should end iteration or not
849 * @nand: NAND device
850 * @iter: NAND I/O iterator
852 * Check whether @iter has reached the end of the NAND portion it was asked to
858 static inline bool nanddev_io_iter_end(struct nand_device *nand, in nanddev_io_iter_end() argument
861 if (iter->dataleft || iter->oobleft) in nanddev_io_iter_end()
868 * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
870 * @nand: NAND device
873 * @iter: NAND I/O iterator
877 #define nanddev_io_for_each_page(nand, type, start, req, iter) \ argument
878 for (nanddev_io_iter_init(nand, type, start, req, iter); \
879 !nanddev_io_iter_end(nand, iter); \
880 nanddev_io_iter_next_page(nand, iter))
882 bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
883 bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
884 int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
885 int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
897 int nanddev_bbt_init(struct nand_device *nand);
898 void nanddev_bbt_cleanup(struct nand_device *nand);
899 int nanddev_bbt_update(struct nand_device *nand);
900 int nanddev_bbt_get_block_status(const struct nand_device *nand,
902 int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
904 int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
907 * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
908 * @nand: NAND device
909 * @pos: the NAND position we want to get BBT entry for
916 static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, in nanddev_bbt_pos_to_entry() argument
919 return pos->eraseblock + in nanddev_bbt_pos_to_entry()
920 ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * in nanddev_bbt_pos_to_entry()
921 nand->memorg.eraseblocks_per_lun); in nanddev_bbt_pos_to_entry()
925 * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
926 * @nand: NAND device
930 static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) in nanddev_bbt_is_initialized() argument
932 return !!nand->bbt.cache; in nanddev_bbt_is_initialized()
935 /* MTD -> NAND helper functions. */