Lines Matching +full:use +full:- +full:minimum +full:- +full:ecc
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright © 2010-2015 Broadcom Corporation
16 #include <linux/dma-mapping.h>
87 #define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
233 /* List of NAND hosts (one for each chip-select) */
236 /* EDU info, per-transaction */
257 /* in-memory cache of the FLASH_CACHE, used only for some commands */
263 const u8 *cs_offsets; /* within each chip-select */
273 /* for low-power standby/resume only */
293 /* use for low-power standby/resume only */
324 BRCMNAND_CS1_BASE, /* CS1 regs, if non-contiguous */
338 BRCMNAND_OOB_READ_10_BASE, /* offset 0x10, if non-contiguous */
340 BRCMNAND_OOB_WRITE_10_BASE, /* offset 0x10, if non-contiguous */
344 /* BRCMNAND v2.1-v2.2 */
374 /* BRCMNAND v3.3-v4.0 */
434 /* BRCMNAND v6.0 - v7.1 */
532 /* Per chip-select offsets for v7.1 */
541 /* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
550 /* Per chip-select offset for <= v5.0 on CS0 only */
560 * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
575 /* Only for pre-v7.1 (with no CFG_EXT register) */
597 return brcmnand_readl(ctrl->nand_base + offs); in nand_readreg()
603 brcmnand_writel(val, ctrl->nand_base + offs); in nand_writereg()
616 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; in brcmnand_revision_init()
619 if (ctrl->nand_version < 0x0201) { in brcmnand_revision_init()
620 dev_err(ctrl->dev, "version %#x not supported\n", in brcmnand_revision_init()
621 ctrl->nand_version); in brcmnand_revision_init()
622 return -ENODEV; in brcmnand_revision_init()
626 if (ctrl->nand_version >= 0x0702) in brcmnand_revision_init()
627 ctrl->reg_offsets = brcmnand_regs_v72; in brcmnand_revision_init()
628 else if (ctrl->nand_version == 0x0701) in brcmnand_revision_init()
629 ctrl->reg_offsets = brcmnand_regs_v71; in brcmnand_revision_init()
630 else if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
631 ctrl->reg_offsets = brcmnand_regs_v60; in brcmnand_revision_init()
632 else if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
633 ctrl->reg_offsets = brcmnand_regs_v50; in brcmnand_revision_init()
634 else if (ctrl->nand_version >= 0x0303) in brcmnand_revision_init()
635 ctrl->reg_offsets = brcmnand_regs_v33; in brcmnand_revision_init()
636 else if (ctrl->nand_version >= 0x0201) in brcmnand_revision_init()
637 ctrl->reg_offsets = brcmnand_regs_v21; in brcmnand_revision_init()
639 /* Chip-select stride */ in brcmnand_revision_init()
640 if (ctrl->nand_version >= 0x0701) in brcmnand_revision_init()
641 ctrl->reg_spacing = 0x14; in brcmnand_revision_init()
643 ctrl->reg_spacing = 0x10; in brcmnand_revision_init()
645 /* Per chip-select registers */ in brcmnand_revision_init()
646 if (ctrl->nand_version >= 0x0701) { in brcmnand_revision_init()
647 ctrl->cs_offsets = brcmnand_cs_offsets_v71; in brcmnand_revision_init()
649 ctrl->cs_offsets = brcmnand_cs_offsets; in brcmnand_revision_init()
651 /* v3.3-5.0 have a different CS0 offset layout */ in brcmnand_revision_init()
652 if (ctrl->nand_version >= 0x0303 && in brcmnand_revision_init()
653 ctrl->nand_version <= 0x0500) in brcmnand_revision_init()
654 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; in brcmnand_revision_init()
658 if (ctrl->nand_version >= 0x0701) { in brcmnand_revision_init()
659 /* >= v7.1 use nice power-of-2 values! */ in brcmnand_revision_init()
660 ctrl->max_page_size = 16 * 1024; in brcmnand_revision_init()
661 ctrl->max_block_size = 2 * 1024 * 1024; in brcmnand_revision_init()
663 if (ctrl->nand_version >= 0x0304) in brcmnand_revision_init()
664 ctrl->page_sizes = page_sizes_v3_4; in brcmnand_revision_init()
665 else if (ctrl->nand_version >= 0x0202) in brcmnand_revision_init()
666 ctrl->page_sizes = page_sizes_v2_2; in brcmnand_revision_init()
668 ctrl->page_sizes = page_sizes_v2_1; in brcmnand_revision_init()
670 if (ctrl->nand_version >= 0x0202) in brcmnand_revision_init()
671 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT; in brcmnand_revision_init()
673 ctrl->page_size_shift = CFG_PAGE_SIZE_SHIFT_v2_1; in brcmnand_revision_init()
675 if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
676 ctrl->block_sizes = block_sizes_v6; in brcmnand_revision_init()
677 else if (ctrl->nand_version >= 0x0400) in brcmnand_revision_init()
678 ctrl->block_sizes = block_sizes_v4; in brcmnand_revision_init()
679 else if (ctrl->nand_version >= 0x0202) in brcmnand_revision_init()
680 ctrl->block_sizes = block_sizes_v2_2; in brcmnand_revision_init()
682 ctrl->block_sizes = block_sizes_v2_1; in brcmnand_revision_init()
684 if (ctrl->nand_version < 0x0400) { in brcmnand_revision_init()
685 if (ctrl->nand_version < 0x0202) in brcmnand_revision_init()
686 ctrl->max_page_size = 2048; in brcmnand_revision_init()
688 ctrl->max_page_size = 4096; in brcmnand_revision_init()
689 ctrl->max_block_size = 512 * 1024; in brcmnand_revision_init()
694 if (ctrl->nand_version == 0x0702) in brcmnand_revision_init()
695 ctrl->max_oob = 128; in brcmnand_revision_init()
696 else if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
697 ctrl->max_oob = 64; in brcmnand_revision_init()
698 else if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
699 ctrl->max_oob = 32; in brcmnand_revision_init()
701 ctrl->max_oob = 16; in brcmnand_revision_init()
704 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601) in brcmnand_revision_init()
705 ctrl->features |= BRCMNAND_HAS_PREFETCH; in brcmnand_revision_init()
711 if (ctrl->nand_version >= 0x0700) in brcmnand_revision_init()
712 ctrl->features |= BRCMNAND_HAS_CACHE_MODE; in brcmnand_revision_init()
714 if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
715 ctrl->features |= BRCMNAND_HAS_1K_SECTORS; in brcmnand_revision_init()
717 if (ctrl->nand_version >= 0x0700) in brcmnand_revision_init()
718 ctrl->features |= BRCMNAND_HAS_WP; in brcmnand_revision_init()
719 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) in brcmnand_revision_init()
720 ctrl->features |= BRCMNAND_HAS_WP; in brcmnand_revision_init()
728 if (ctrl->nand_version >= 0x0703) in brcmnand_flash_dma_revision_init()
729 ctrl->flash_dma_offsets = flash_dma_regs_v4; in brcmnand_flash_dma_revision_init()
730 else if (ctrl->nand_version == 0x0602) in brcmnand_flash_dma_revision_init()
731 ctrl->flash_dma_offsets = flash_dma_regs_v0; in brcmnand_flash_dma_revision_init()
733 ctrl->flash_dma_offsets = flash_dma_regs_v1; in brcmnand_flash_dma_revision_init()
739 u16 offs = ctrl->reg_offsets[reg]; in brcmnand_read_reg()
750 u16 offs = ctrl->reg_offsets[reg]; in brcmnand_write_reg()
769 return __raw_readl(ctrl->nand_fc + word * 4); in brcmnand_read_fc()
775 __raw_writel(val, ctrl->nand_fc + word * 4); in brcmnand_write_fc()
781 u16 offs = ctrl->edu_offsets[reg]; in edu_writel()
783 brcmnand_writel(val, ctrl->edu_base + offs); in edu_writel()
789 u16 offs = ctrl->edu_offsets[reg]; in edu_readl()
791 return brcmnand_readl(ctrl->edu_base + offs); in edu_readl()
832 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_cmd_addr()
835 (host->cs << 16) | ((addr >> 32) & 0xffff)); in brcmnand_set_cmd_addr()
845 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE]; in brcmnand_cs_offset()
846 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE]; in brcmnand_cs_offset()
849 if (cs == 0 && ctrl->cs0_offsets) in brcmnand_cs_offset()
850 cs_offs = ctrl->cs0_offsets[reg]; in brcmnand_cs_offset()
852 cs_offs = ctrl->cs_offsets[reg]; in brcmnand_cs_offset()
855 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; in brcmnand_cs_offset()
857 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; in brcmnand_cs_offset()
862 if (ctrl->nand_version < 0x0600) in brcmnand_count_corrected()
869 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_wr_corr_thresh()
872 int cs = host->cs; in brcmnand_wr_corr_thresh()
874 if (!ctrl->reg_offsets[reg]) in brcmnand_wr_corr_thresh()
877 if (ctrl->nand_version == 0x0702) in brcmnand_wr_corr_thresh()
879 else if (ctrl->nand_version >= 0x0600) in brcmnand_wr_corr_thresh()
881 else if (ctrl->nand_version >= 0x0500) in brcmnand_wr_corr_thresh()
886 if (ctrl->nand_version >= 0x0702) { in brcmnand_wr_corr_thresh()
890 } else if (ctrl->nand_version >= 0x0600) { in brcmnand_wr_corr_thresh()
895 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val); in brcmnand_wr_corr_thresh()
900 if (ctrl->nand_version < 0x0602) in brcmnand_cmd_shift()
931 if (ctrl->nand_version == 0x0702) in brcmnand_spare_area_mask()
933 else if (ctrl->nand_version >= 0x0600) in brcmnand_spare_area_mask()
935 else if (ctrl->nand_version >= 0x0303) in brcmnand_spare_area_mask()
946 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; in brcmnand_ecc_level_mask()
950 /* v7.2 includes additional ECC levels */ in brcmnand_ecc_level_mask()
951 if (ctrl->nand_version >= 0x0702) in brcmnand_ecc_level_mask()
959 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_ecc_enabled()
960 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); in brcmnand_set_ecc_enabled()
965 acc_control |= ecc_flags; /* enable RD/WR ECC */ in brcmnand_set_ecc_enabled()
966 acc_control |= host->hwcfg.ecc_level in brcmnand_set_ecc_enabled()
969 acc_control &= ~ecc_flags; /* disable RD/WR ECC */ in brcmnand_set_ecc_enabled()
978 if (ctrl->nand_version >= 0x0702) in brcmnand_sector_1k_shift()
980 else if (ctrl->nand_version >= 0x0600) in brcmnand_sector_1k_shift()
982 else if (ctrl->nand_version >= 0x0500) in brcmnand_sector_1k_shift()
985 return -1; in brcmnand_sector_1k_shift()
990 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_get_sector_size_1k()
992 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_get_sector_size_1k()
1003 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_sector_size_1k()
1005 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_sector_size_1k()
1046 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", in bcmnand_ctrl_poll_status()
1049 return -ETIMEDOUT; in bcmnand_ctrl_poll_status()
1065 return ctrl->flash_dma_base; in has_flash_dma()
1070 return ctrl->edu_base; in has_edu()
1080 if (ctrl->pio_poll_mode) in disable_ctrl_irqs()
1084 ctrl->flash_dma_base = NULL; in disable_ctrl_irqs()
1085 disable_irq(ctrl->dma_irq); in disable_ctrl_irqs()
1088 disable_irq(ctrl->irq); in disable_ctrl_irqs()
1089 ctrl->pio_poll_mode = true; in disable_ctrl_irqs()
1101 u16 offs = ctrl->flash_dma_offsets[dma_reg]; in flash_dma_writel()
1103 brcmnand_writel(val, ctrl->flash_dma_base + offs); in flash_dma_writel()
1109 u16 offs = ctrl->flash_dma_offsets[dma_reg]; in flash_dma_readl()
1111 return brcmnand_readl(ctrl->flash_dma_base + offs); in flash_dma_readl()
1114 /* Low-level operation types: command, address, write, or read */
1129 if (ctrl->nand_version <= 0x0701) in is_hamming_ecc()
1130 return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 && in is_hamming_ecc()
1131 cfg->ecc_level == 15; in is_hamming_ecc()
1133 return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 && in is_hamming_ecc()
1134 cfg->ecc_level == 15) || in is_hamming_ecc()
1135 (cfg->spare_area_size == 28 && cfg->ecc_level == 16)); in is_hamming_ecc()
1139 * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
1141 * Returns -ERRCODE on failure.
1148 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_hamming_ooblayout_ecc()
1149 int sas = cfg->spare_area_size << cfg->sector_size_1k; in brcmnand_hamming_ooblayout_ecc()
1150 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); in brcmnand_hamming_ooblayout_ecc()
1153 return -ERANGE; in brcmnand_hamming_ooblayout_ecc()
1155 oobregion->offset = (section * sas) + 6; in brcmnand_hamming_ooblayout_ecc()
1156 oobregion->length = 3; in brcmnand_hamming_ooblayout_ecc()
1166 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_hamming_ooblayout_free()
1167 int sas = cfg->spare_area_size << cfg->sector_size_1k; in brcmnand_hamming_ooblayout_free()
1168 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); in brcmnand_hamming_ooblayout_free()
1172 return -ERANGE; in brcmnand_hamming_ooblayout_free()
1179 oobregion->offset = ((section - 1) * sas) + 9; in brcmnand_hamming_ooblayout_free()
1181 if (cfg->page_size > 512) { in brcmnand_hamming_ooblayout_free()
1183 oobregion->offset = 2; in brcmnand_hamming_ooblayout_free()
1185 /* Small page NAND uses last byte before ECC for BBI */ in brcmnand_hamming_ooblayout_free()
1186 oobregion->offset = 0; in brcmnand_hamming_ooblayout_free()
1187 next--; in brcmnand_hamming_ooblayout_free()
1191 oobregion->length = next - oobregion->offset; in brcmnand_hamming_ooblayout_free()
1197 .ecc = brcmnand_hamming_ooblayout_ecc,
1206 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_bch_ooblayout_ecc()
1207 int sas = cfg->spare_area_size << cfg->sector_size_1k; in brcmnand_bch_ooblayout_ecc()
1208 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); in brcmnand_bch_ooblayout_ecc()
1211 return -ERANGE; in brcmnand_bch_ooblayout_ecc()
1213 oobregion->offset = ((section + 1) * sas) - chip->ecc.bytes; in brcmnand_bch_ooblayout_ecc()
1214 oobregion->length = chip->ecc.bytes; in brcmnand_bch_ooblayout_ecc()
1224 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_bch_ooblayout_free_lp()
1225 int sas = cfg->spare_area_size << cfg->sector_size_1k; in brcmnand_bch_ooblayout_free_lp()
1226 int sectors = cfg->page_size / (512 << cfg->sector_size_1k); in brcmnand_bch_ooblayout_free_lp()
1229 return -ERANGE; in brcmnand_bch_ooblayout_free_lp()
1231 if (sas <= chip->ecc.bytes) in brcmnand_bch_ooblayout_free_lp()
1234 oobregion->offset = section * sas; in brcmnand_bch_ooblayout_free_lp()
1235 oobregion->length = sas - chip->ecc.bytes; in brcmnand_bch_ooblayout_free_lp()
1238 oobregion->offset++; in brcmnand_bch_ooblayout_free_lp()
1239 oobregion->length--; in brcmnand_bch_ooblayout_free_lp()
1250 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_bch_ooblayout_free_sp()
1251 int sas = cfg->spare_area_size << cfg->sector_size_1k; in brcmnand_bch_ooblayout_free_sp()
1253 if (section > 1 || sas - chip->ecc.bytes < 6 || in brcmnand_bch_ooblayout_free_sp()
1254 (section && sas - chip->ecc.bytes == 6)) in brcmnand_bch_ooblayout_free_sp()
1255 return -ERANGE; in brcmnand_bch_ooblayout_free_sp()
1258 oobregion->offset = 0; in brcmnand_bch_ooblayout_free_sp()
1259 oobregion->length = 5; in brcmnand_bch_ooblayout_free_sp()
1261 oobregion->offset = 6; in brcmnand_bch_ooblayout_free_sp()
1262 oobregion->length = sas - chip->ecc.bytes - 6; in brcmnand_bch_ooblayout_free_sp()
1269 .ecc = brcmnand_bch_ooblayout_ecc,
1274 .ecc = brcmnand_bch_ooblayout_ecc,
1280 struct brcmnand_cfg *p = &host->hwcfg; in brcmstb_choose_ecc_layout()
1281 struct mtd_info *mtd = nand_to_mtd(&host->chip); in brcmstb_choose_ecc_layout()
1282 struct nand_ecc_ctrl *ecc = &host->chip.ecc; in brcmstb_choose_ecc_layout() local
1283 unsigned int ecc_level = p->ecc_level; in brcmstb_choose_ecc_layout()
1284 int sas = p->spare_area_size << p->sector_size_1k; in brcmstb_choose_ecc_layout()
1285 int sectors = p->page_size / (512 << p->sector_size_1k); in brcmstb_choose_ecc_layout()
1287 if (p->sector_size_1k) in brcmstb_choose_ecc_layout()
1290 if (is_hamming_ecc(host->ctrl, p)) { in brcmstb_choose_ecc_layout()
1291 ecc->bytes = 3 * sectors; in brcmstb_choose_ecc_layout()
1302 ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8); in brcmstb_choose_ecc_layout()
1303 if (p->page_size == 512) in brcmstb_choose_ecc_layout()
1308 if (ecc->bytes >= sas) { in brcmstb_choose_ecc_layout()
1309 dev_err(&host->pdev->dev, in brcmstb_choose_ecc_layout()
1310 "error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n", in brcmstb_choose_ecc_layout()
1311 ecc->bytes, sas); in brcmstb_choose_ecc_layout()
1312 return -EINVAL; in brcmstb_choose_ecc_layout()
1322 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_wp()
1324 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { in brcmnand_wp()
1325 static int old_wp = -1; in brcmnand_wp()
1329 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); in brcmnand_wp()
1356 dev_err_ratelimited(&host->pdev->dev, in brcmnand_wp()
1367 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE]; in oob_reg_read()
1368 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE]; in oob_reg_read()
1370 if (offs >= ctrl->max_oob) in oob_reg_read()
1374 reg_offs = offset10 + ((offs - 0x10) & ~0x03); in oob_reg_read()
1378 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3)); in oob_reg_read()
1386 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE]; in oob_reg_write()
1387 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE]; in oob_reg_write()
1389 if (offs >= ctrl->max_oob) in oob_reg_write()
1393 reg_offs = offset10 + ((offs - 0x10) & ~0x03); in oob_reg_write()
1401 * read_oob_from_regs - read data from OOB registers
1403 * @i: sub-page sector index
1416 tbytes = max(0, tbytes - (int)ctrl->max_oob); in read_oob_from_regs()
1417 tbytes = min_t(int, tbytes, ctrl->max_oob); in read_oob_from_regs()
1425 * write_oob_to_regs - write data to OOB registers
1426 * @i: sub-page sector index
1439 tbytes = max(0, tbytes - (int)ctrl->max_oob); in write_oob_to_regs()
1440 tbytes = min_t(int, tbytes, ctrl->max_oob); in write_oob_to_regs()
1468 if (ctrl->edu_count) { in brcmnand_edu_irq()
1469 ctrl->edu_count--; in brcmnand_edu_irq()
1476 if (ctrl->edu_count) { in brcmnand_edu_irq()
1477 ctrl->edu_dram_addr += FC_BYTES; in brcmnand_edu_irq()
1478 ctrl->edu_ext_addr += FC_BYTES; in brcmnand_edu_irq()
1480 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr); in brcmnand_edu_irq()
1482 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr); in brcmnand_edu_irq()
1485 if (ctrl->oob) { in brcmnand_edu_irq()
1486 if (ctrl->edu_cmd == EDU_CMD_READ) { in brcmnand_edu_irq()
1487 ctrl->oob += read_oob_from_regs(ctrl, in brcmnand_edu_irq()
1488 ctrl->edu_count + 1, in brcmnand_edu_irq()
1489 ctrl->oob, ctrl->sas, in brcmnand_edu_irq()
1490 ctrl->sector_size_1k); in brcmnand_edu_irq()
1493 ctrl->edu_ext_addr); in brcmnand_edu_irq()
1495 ctrl->oob += write_oob_to_regs(ctrl, in brcmnand_edu_irq()
1496 ctrl->edu_count, in brcmnand_edu_irq()
1497 ctrl->oob, ctrl->sas, in brcmnand_edu_irq()
1498 ctrl->sector_size_1k); in brcmnand_edu_irq()
1503 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd); in brcmnand_edu_irq()
1509 complete(&ctrl->edu_done); in brcmnand_edu_irq()
1519 if (ctrl->dma_pending) in brcmnand_ctlrdy_irq()
1523 if (ctrl->edu_pending) { in brcmnand_ctlrdy_irq()
1524 if (irq == ctrl->irq && ((int)ctrl->edu_irq >= 0)) in brcmnand_ctlrdy_irq()
1532 complete(&ctrl->done); in brcmnand_ctlrdy_irq()
1536 /* Handle SoC-specific interrupt hardware */
1541 if (ctrl->soc->ctlrdy_ack(ctrl->soc)) in brcmnand_irq()
1551 complete(&ctrl->dma_done); in brcmnand_dma_irq()
1558 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_send_cmd()
1564 dev_dbg(ctrl->dev, "send native cmd %d addr 0x%llx\n", cmd, cmd_addr); in brcmnand_send_cmd()
1566 BUG_ON(ctrl->cmd_pending != 0); in brcmnand_send_cmd()
1567 ctrl->cmd_pending = cmd; in brcmnand_send_cmd()
1590 struct brcmnand_controller *ctrl = host->ctrl; in brcmstb_nand_wait_for_completion()
1595 if (mtd->oops_panic_write) { in brcmstb_nand_wait_for_completion()
1605 sts = wait_for_completion_timeout(&ctrl->done, timeo); in brcmstb_nand_wait_for_completion()
1615 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_waitfunc()
1618 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); in brcmnand_waitfunc()
1619 if (ctrl->cmd_pending) in brcmnand_waitfunc()
1626 dev_err_ratelimited(ctrl->dev, in brcmnand_waitfunc()
1628 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", in brcmnand_waitfunc()
1631 ctrl->cmd_pending = 0; in brcmnand_waitfunc()
1650 struct nand_chip *chip = &host->chip; in brcmnand_low_level_op()
1651 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_low_level_op()
1676 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp); in brcmnand_low_level_op()
1690 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_cmdfunc()
1691 u64 addr = (u64)page_addr << chip->page_shift; in brcmnand_cmdfunc()
1697 /* Avoid propagating a negative, don't-care address */ in brcmnand_cmdfunc()
1701 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, in brcmnand_cmdfunc()
1704 host->last_cmd = command; in brcmnand_cmdfunc()
1705 host->last_byte = 0; in brcmnand_cmdfunc()
1706 host->last_addr = addr; in brcmnand_cmdfunc()
1735 addr &= ~((u64)(FC_BYTES - 1)); in brcmnand_cmdfunc()
1741 host->hwcfg.sector_size_1k = in brcmnand_cmdfunc()
1757 /* Copy flash cache word-wise */ in brcmnand_cmdfunc()
1758 u32 *flash_cache = (u32 *)ctrl->flash_cache; in brcmnand_cmdfunc()
1761 brcmnand_soc_data_bus_prepare(ctrl->soc, true); in brcmnand_cmdfunc()
1774 brcmnand_soc_data_bus_unprepare(ctrl->soc, true); in brcmnand_cmdfunc()
1777 if (host->hwcfg.sector_size_1k) in brcmnand_cmdfunc()
1779 host->hwcfg.sector_size_1k); in brcmnand_cmdfunc()
1782 /* Re-enable protection is necessary only after erase */ in brcmnand_cmdfunc()
1790 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read_byte()
1794 switch (host->last_cmd) { in brcmnand_read_byte()
1796 if (host->last_byte < 4) in brcmnand_read_byte()
1798 (24 - (host->last_byte << 3)); in brcmnand_read_byte()
1799 else if (host->last_byte < 8) in brcmnand_read_byte()
1801 (56 - (host->last_byte << 3)); in brcmnand_read_byte()
1805 ret = oob_reg_read(ctrl, host->last_byte); in brcmnand_read_byte()
1817 addr = host->last_addr + host->last_byte; in brcmnand_read_byte()
1818 offs = addr & (FC_BYTES - 1); in brcmnand_read_byte()
1821 if (host->last_byte > 0 && offs == 0) in brcmnand_read_byte()
1824 ret = ctrl->flash_cache[offs]; in brcmnand_read_byte()
1827 if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { in brcmnand_read_byte()
1830 bool last = host->last_byte == in brcmnand_read_byte()
1831 ONFI_SUBFEATURE_PARAM_LEN - 1; in brcmnand_read_byte()
1837 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); in brcmnand_read_byte()
1838 host->last_byte++; in brcmnand_read_byte()
1857 switch (host->last_cmd) { in brcmnand_write_buf()
1875 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_edu_trans()
1876 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_edu_trans()
1884 dev_dbg(ctrl->dev, "EDU %s %p:%p\n", ((edu_cmd == EDU_CMD_READ) ? in brcmnand_edu_trans()
1887 pa = dma_map_single(ctrl->dev, buf, len, dir); in brcmnand_edu_trans()
1888 if (dma_mapping_error(ctrl->dev, pa)) { in brcmnand_edu_trans()
1889 dev_err(ctrl->dev, "unable to map buffer for EDU DMA\n"); in brcmnand_edu_trans()
1890 return -ENOMEM; in brcmnand_edu_trans()
1893 ctrl->edu_pending = true; in brcmnand_edu_trans()
1894 ctrl->edu_dram_addr = pa; in brcmnand_edu_trans()
1895 ctrl->edu_ext_addr = addr; in brcmnand_edu_trans()
1896 ctrl->edu_cmd = edu_cmd; in brcmnand_edu_trans()
1897 ctrl->edu_count = trans; in brcmnand_edu_trans()
1898 ctrl->sas = cfg->spare_area_size; in brcmnand_edu_trans()
1899 ctrl->oob = oob; in brcmnand_edu_trans()
1901 edu_writel(ctrl, EDU_DRAM_ADDR, (u32)ctrl->edu_dram_addr); in brcmnand_edu_trans()
1903 edu_writel(ctrl, EDU_EXT_ADDR, ctrl->edu_ext_addr); in brcmnand_edu_trans()
1908 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_WRITE)) { in brcmnand_edu_trans()
1910 ctrl->edu_ext_addr); in brcmnand_edu_trans()
1912 ctrl->oob += write_oob_to_regs(ctrl, in brcmnand_edu_trans()
1914 ctrl->oob, ctrl->sas, in brcmnand_edu_trans()
1915 ctrl->sector_size_1k); in brcmnand_edu_trans()
1920 edu_writel(ctrl, EDU_CMD, ctrl->edu_cmd); in brcmnand_edu_trans()
1923 if (wait_for_completion_timeout(&ctrl->edu_done, timeo) <= 0) { in brcmnand_edu_trans()
1924 dev_err(ctrl->dev, in brcmnand_edu_trans()
1930 dma_unmap_single(ctrl->dev, pa, len, dir); in brcmnand_edu_trans()
1933 if (ctrl->oob && (ctrl->edu_cmd == EDU_CMD_READ)) { in brcmnand_edu_trans()
1934 ctrl->oob += read_oob_from_regs(ctrl, in brcmnand_edu_trans()
1936 ctrl->oob, ctrl->sas, in brcmnand_edu_trans()
1937 ctrl->sector_size_1k); in brcmnand_edu_trans()
1944 dev_info(ctrl->dev, "program failed at %llx\n", in brcmnand_edu_trans()
1946 ret = -EIO; in brcmnand_edu_trans()
1951 dev_warn(ctrl->dev, "EDU still active: %#x\n", in brcmnand_edu_trans()
1955 dev_warn(ctrl->dev, "EDU RBUS error at addr %llx\n", in brcmnand_edu_trans()
1957 ret = -EIO; in brcmnand_edu_trans()
1960 ctrl->edu_pending = false; in brcmnand_edu_trans()
1969 * check for ECC errors here, subpage ECC errors are in brcmnand_edu_trans()
1970 * retained in ECC error address register in brcmnand_edu_trans()
1976 ret = -EUCLEAN; in brcmnand_edu_trans()
1978 ret = -EBADMSG; in brcmnand_edu_trans()
1987 * - Is this descriptor the beginning or end of a linked list?
1988 * - What is the (DMA) address of the next descriptor in the linked list?
1998 desc->next_desc = lower_32_bits(next_desc); in brcmnand_fill_dma_desc()
1999 desc->next_desc_ext = upper_32_bits(next_desc); in brcmnand_fill_dma_desc()
2000 desc->cmd_irq = (dma_cmd << 24) | in brcmnand_fill_dma_desc()
2004 desc->cmd_irq |= 0x01 << 12; in brcmnand_fill_dma_desc()
2006 desc->dram_addr = lower_32_bits(buf); in brcmnand_fill_dma_desc()
2007 desc->dram_addr_ext = upper_32_bits(buf); in brcmnand_fill_dma_desc()
2008 desc->tfr_len = len; in brcmnand_fill_dma_desc()
2009 desc->total_len = len; in brcmnand_fill_dma_desc()
2010 desc->flash_addr = lower_32_bits(addr); in brcmnand_fill_dma_desc()
2011 desc->flash_addr_ext = upper_32_bits(addr); in brcmnand_fill_dma_desc()
2012 desc->cs = host->cs; in brcmnand_fill_dma_desc()
2013 desc->status_valid = 0x01; in brcmnand_fill_dma_desc()
2022 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_dma_run()
2027 if (ctrl->nand_version > 0x0602) { in brcmnand_dma_run()
2034 ctrl->dma_pending = true; in brcmnand_dma_run()
2038 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) { in brcmnand_dma_run()
2039 dev_err(ctrl->dev, in brcmnand_dma_run()
2044 ctrl->dma_pending = false; in brcmnand_dma_run()
2051 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_dma_trans()
2055 buf_pa = dma_map_single(ctrl->dev, buf, len, dir); in brcmnand_dma_trans()
2056 if (dma_mapping_error(ctrl->dev, buf_pa)) { in brcmnand_dma_trans()
2057 dev_err(ctrl->dev, "unable to map buffer for DMA\n"); in brcmnand_dma_trans()
2058 return -ENOMEM; in brcmnand_dma_trans()
2061 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len, in brcmnand_dma_trans()
2064 brcmnand_dma_run(host, ctrl->dma_pa); in brcmnand_dma_trans()
2066 dma_unmap_single(ctrl->dev, buf_pa, len, dir); in brcmnand_dma_trans()
2068 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR) in brcmnand_dma_trans()
2069 return -EBADMSG; in brcmnand_dma_trans()
2070 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR) in brcmnand_dma_trans()
2071 return -EUCLEAN; in brcmnand_dma_trans()
2084 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read_by_pio()
2091 /* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */ in brcmnand_read_by_pio()
2096 brcmnand_soc_data_bus_prepare(ctrl->soc, false); in brcmnand_read_by_pio()
2101 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); in brcmnand_read_by_pio()
2106 mtd->oobsize / trans, in brcmnand_read_by_pio()
2107 host->hwcfg.sector_size_1k); in brcmnand_read_by_pio()
2113 ret = -EBADMSG; in brcmnand_read_by_pio()
2120 ret = -EUCLEAN; in brcmnand_read_by_pio()
2128 * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
2131 * Because the HW ECC signals an ECC error if an erase paged has even a single
2132 * bitflip, we must check each ECC error to see if it is actually an erased
2135 * On a real error, return a negative error code (-EBADMSG for ECC error), and
2138 * bitflips-per-ECC-sector to the caller.
2144 struct mtd_oob_region ecc; in brcmstb_nand_verify_erased_page() local
2147 int page = addr >> chip->page_shift; in brcmstb_nand_verify_erased_page()
2155 /* read without ecc for verification */ in brcmstb_nand_verify_erased_page()
2156 ret = chip->ecc.read_page_raw(chip, buf, true, page); in brcmstb_nand_verify_erased_page()
2160 for (i = 0; i < chip->ecc.steps; i++) { in brcmstb_nand_verify_erased_page()
2161 ecc_chunk = buf + chip->ecc.size * i; in brcmstb_nand_verify_erased_page()
2163 mtd_ooblayout_ecc(mtd, i, &ecc); in brcmstb_nand_verify_erased_page()
2164 ecc_bytes = chip->oob_poi + ecc.offset; in brcmstb_nand_verify_erased_page()
2166 ret = nand_check_erased_ecc_chunk(ecc_chunk, chip->ecc.size, in brcmstb_nand_verify_erased_page()
2167 ecc_bytes, ecc.length, in brcmstb_nand_verify_erased_page()
2169 chip->ecc.strength); in brcmstb_nand_verify_erased_page()
2183 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read()
2189 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); in brcmnand_read()
2194 if (ctrl->dma_trans && (has_edu(ctrl) || !oob) && in brcmnand_read()
2196 err = ctrl->dma_trans(host, addr, buf, oob, in brcmnand_read()
2204 return -EIO; in brcmnand_read()
2212 memset(oob, 0x99, mtd->oobsize); in brcmnand_read()
2227 if ((ctrl->nand_version == 0x0700) || in brcmnand_read()
2228 (ctrl->nand_version == 0x0701)) { in brcmnand_read()
2239 if (ctrl->nand_version < 0x0702) { in brcmnand_read()
2247 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", in brcmnand_read()
2249 mtd->ecc_stats.failed++; in brcmnand_read()
2250 /* NAND layer expects zero on ECC errors */ in brcmnand_read()
2262 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", in brcmnand_read()
2264 mtd->ecc_stats.corrected += corrected; in brcmnand_read()
2265 /* Always exceed the software-imposed threshold */ in brcmnand_read()
2266 return max(mtd->bitflip_threshold, corrected); in brcmnand_read()
2277 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; in brcmnand_read_page()
2281 return brcmnand_read(mtd, chip, host->last_addr, in brcmnand_read_page()
2282 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); in brcmnand_read_page()
2290 u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL; in brcmnand_read_page_raw()
2296 ret = brcmnand_read(mtd, chip, host->last_addr, in brcmnand_read_page_raw()
2297 mtd->writesize >> FC_SHIFT, (u32 *)buf, oob); in brcmnand_read_page_raw()
2306 return brcmnand_read(mtd, chip, (u64)page << chip->page_shift, in brcmnand_read_oob()
2307 mtd->writesize >> FC_SHIFT, in brcmnand_read_oob()
2308 NULL, (u8 *)chip->oob_poi); in brcmnand_read_oob()
2317 brcmnand_read(mtd, chip, (u64)page << chip->page_shift, in brcmnand_read_oob_raw()
2318 mtd->writesize >> FC_SHIFT, in brcmnand_read_oob_raw()
2319 NULL, (u8 *)chip->oob_poi); in brcmnand_read_oob_raw()
2328 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_write()
2329 unsigned int i, j, trans = mtd->writesize >> FC_SHIFT; in brcmnand_write()
2332 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf); in brcmnand_write()
2335 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf); in brcmnand_write()
2341 for (i = 0; i < ctrl->max_oob; i += 4) in brcmnand_write()
2344 if (mtd->oops_panic_write) in brcmnand_write()
2349 if (ctrl->dma_trans(host, addr, (u32 *)buf, oob, mtd->writesize, in brcmnand_write()
2352 ret = -EIO; in brcmnand_write()
2362 brcmnand_soc_data_bus_prepare(ctrl->soc, false); in brcmnand_write()
2367 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); in brcmnand_write()
2375 mtd->oobsize / trans, in brcmnand_write()
2376 host->hwcfg.sector_size_1k); in brcmnand_write()
2379 /* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */ in brcmnand_write()
2384 dev_info(ctrl->dev, "program failed at %llx\n", in brcmnand_write()
2386 ret = -EIO; in brcmnand_write()
2400 void *oob = oob_required ? chip->oob_poi : NULL; in brcmnand_write_page()
2403 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); in brcmnand_write_page()
2413 void *oob = oob_required ? chip->oob_poi : NULL; in brcmnand_write_page_raw()
2417 brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob); in brcmnand_write_page_raw()
2426 (u64)page << chip->page_shift, NULL, in brcmnand_write_oob()
2427 chip->oob_poi); in brcmnand_write_oob()
2437 ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL, in brcmnand_write_oob_raw()
2438 (u8 *)chip->oob_poi); in brcmnand_write_oob_raw()
2445 * Per-CS setup (1 NAND device)
2451 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_cfg()
2452 struct nand_chip *chip = &host->chip; in brcmnand_set_cfg()
2453 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_set_cfg()
2454 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_cfg()
2456 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_cfg()
2461 if (ctrl->block_sizes) { in brcmnand_set_cfg()
2464 for (i = 0, found = 0; ctrl->block_sizes[i]; i++) in brcmnand_set_cfg()
2465 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) { in brcmnand_set_cfg()
2470 dev_warn(ctrl->dev, "invalid block size %u\n", in brcmnand_set_cfg()
2471 cfg->block_size); in brcmnand_set_cfg()
2472 return -EINVAL; in brcmnand_set_cfg()
2475 block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE); in brcmnand_set_cfg()
2478 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size && in brcmnand_set_cfg()
2479 cfg->block_size > ctrl->max_block_size)) { in brcmnand_set_cfg()
2480 dev_warn(ctrl->dev, "invalid block size %u\n", in brcmnand_set_cfg()
2481 cfg->block_size); in brcmnand_set_cfg()
2485 if (ctrl->page_sizes) { in brcmnand_set_cfg()
2488 for (i = 0, found = 0; ctrl->page_sizes[i]; i++) in brcmnand_set_cfg()
2489 if (ctrl->page_sizes[i] == cfg->page_size) { in brcmnand_set_cfg()
2494 dev_warn(ctrl->dev, "invalid page size %u\n", in brcmnand_set_cfg()
2495 cfg->page_size); in brcmnand_set_cfg()
2496 return -EINVAL; in brcmnand_set_cfg()
2499 page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE); in brcmnand_set_cfg()
2502 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size && in brcmnand_set_cfg()
2503 cfg->page_size > ctrl->max_page_size)) { in brcmnand_set_cfg()
2504 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size); in brcmnand_set_cfg()
2505 return -EINVAL; in brcmnand_set_cfg()
2508 if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) { in brcmnand_set_cfg()
2509 dev_warn(ctrl->dev, "invalid device size 0x%llx\n", in brcmnand_set_cfg()
2510 (unsigned long long)cfg->device_size); in brcmnand_set_cfg()
2511 return -EINVAL; in brcmnand_set_cfg()
2513 device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE); in brcmnand_set_cfg()
2515 tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) | in brcmnand_set_cfg()
2516 (cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) | in brcmnand_set_cfg()
2517 (cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) | in brcmnand_set_cfg()
2518 (!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) | in brcmnand_set_cfg()
2521 tmp |= (page_size << ctrl->page_size_shift) | in brcmnand_set_cfg()
2534 if (ctrl->nand_version >= 0x0302) { in brcmnand_set_cfg()
2535 tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT; in brcmnand_set_cfg()
2536 tmp |= cfg->spare_area_size; in brcmnand_set_cfg()
2540 brcmnand_set_sector_size_1k(host, cfg->sector_size_1k); in brcmnand_set_cfg()
2542 /* threshold = ceil(BCH-level * 0.75) */ in brcmnand_set_cfg()
2543 brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4)); in brcmnand_set_cfg()
2552 "%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit", in brcmnand_print_cfg()
2553 (unsigned long long)cfg->device_size >> 20, in brcmnand_print_cfg()
2554 cfg->block_size >> 10, in brcmnand_print_cfg()
2555 cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size, in brcmnand_print_cfg()
2556 cfg->page_size >= 1024 ? "KiB" : "B", in brcmnand_print_cfg()
2557 cfg->spare_area_size, cfg->device_width); in brcmnand_print_cfg()
2559 /* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */ in brcmnand_print_cfg()
2560 if (is_hamming_ecc(host->ctrl, cfg)) in brcmnand_print_cfg()
2561 sprintf(buf, ", Hamming ECC"); in brcmnand_print_cfg()
2562 else if (cfg->sector_size_1k) in brcmnand_print_cfg()
2563 sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1); in brcmnand_print_cfg()
2565 sprintf(buf, ", BCH-%u", cfg->ecc_level); in brcmnand_print_cfg()
2569 * Minimum number of bytes to address a page. Calculated as:
2570 * roundup(log2(size / page-size) / 8)
2572 * NB: the following does not "round up" for non-power-of-2 'size'; but this is
2577 return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3; in get_blk_adr_bytes()
2582 struct mtd_info *mtd = nand_to_mtd(&host->chip); in brcmnand_setup_dev()
2583 struct nand_chip *chip = &host->chip; in brcmnand_setup_dev()
2585 nanddev_get_ecc_requirements(&chip->base); in brcmnand_setup_dev()
2586 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_setup_dev()
2587 struct brcmnand_cfg *cfg = &host->hwcfg; in brcmnand_setup_dev()
2595 "brcm,nand-oob-sector-size", in brcmnand_setup_dev()
2598 /* Use detected size */ in brcmnand_setup_dev()
2599 cfg->spare_area_size = mtd->oobsize / in brcmnand_setup_dev()
2600 (mtd->writesize >> FC_SHIFT); in brcmnand_setup_dev()
2602 cfg->spare_area_size = oob_sector; in brcmnand_setup_dev()
2604 if (cfg->spare_area_size > ctrl->max_oob) in brcmnand_setup_dev()
2605 cfg->spare_area_size = ctrl->max_oob; in brcmnand_setup_dev()
2610 mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT); in brcmnand_setup_dev()
2612 cfg->device_size = mtd->size; in brcmnand_setup_dev()
2613 cfg->block_size = mtd->erasesize; in brcmnand_setup_dev()
2614 cfg->page_size = mtd->writesize; in brcmnand_setup_dev()
2615 cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8; in brcmnand_setup_dev()
2616 cfg->col_adr_bytes = 2; in brcmnand_setup_dev()
2617 cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize); in brcmnand_setup_dev()
2619 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST) { in brcmnand_setup_dev()
2620 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", in brcmnand_setup_dev()
2621 chip->ecc.engine_type); in brcmnand_setup_dev()
2622 return -EINVAL; in brcmnand_setup_dev()
2625 if (chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN) { in brcmnand_setup_dev()
2626 if (chip->ecc.strength == 1 && chip->ecc.size == 512) in brcmnand_setup_dev()
2627 /* Default to Hamming for 1-bit ECC, if unspecified */ in brcmnand_setup_dev()
2628 chip->ecc.algo = NAND_ECC_ALGO_HAMMING; in brcmnand_setup_dev()
2631 chip->ecc.algo = NAND_ECC_ALGO_BCH; in brcmnand_setup_dev()
2634 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING && in brcmnand_setup_dev()
2635 (chip->ecc.strength != 1 || chip->ecc.size != 512)) { in brcmnand_setup_dev()
2636 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", in brcmnand_setup_dev()
2637 chip->ecc.strength, chip->ecc.size); in brcmnand_setup_dev()
2638 return -EINVAL; in brcmnand_setup_dev()
2641 if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE && in brcmnand_setup_dev()
2642 (!chip->ecc.size || !chip->ecc.strength)) { in brcmnand_setup_dev()
2643 if (requirements->step_size && requirements->strength) { in brcmnand_setup_dev()
2644 /* use detected ECC parameters */ in brcmnand_setup_dev()
2645 chip->ecc.size = requirements->step_size; in brcmnand_setup_dev()
2646 chip->ecc.strength = requirements->strength; in brcmnand_setup_dev()
2647 dev_info(ctrl->dev, "Using ECC step-size %d, strength %d\n", in brcmnand_setup_dev()
2648 chip->ecc.size, chip->ecc.strength); in brcmnand_setup_dev()
2652 switch (chip->ecc.size) { in brcmnand_setup_dev()
2654 if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING) in brcmnand_setup_dev()
2655 cfg->ecc_level = 15; in brcmnand_setup_dev()
2657 cfg->ecc_level = chip->ecc.strength; in brcmnand_setup_dev()
2658 cfg->sector_size_1k = 0; in brcmnand_setup_dev()
2661 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) { in brcmnand_setup_dev()
2662 dev_err(ctrl->dev, "1KB sectors not supported\n"); in brcmnand_setup_dev()
2663 return -EINVAL; in brcmnand_setup_dev()
2665 if (chip->ecc.strength & 0x1) { in brcmnand_setup_dev()
2666 dev_err(ctrl->dev, in brcmnand_setup_dev()
2667 "odd ECC not supported with 1KB sectors\n"); in brcmnand_setup_dev()
2668 return -EINVAL; in brcmnand_setup_dev()
2671 cfg->ecc_level = chip->ecc.strength >> 1; in brcmnand_setup_dev()
2672 cfg->sector_size_1k = 1; in brcmnand_setup_dev()
2675 dev_err(ctrl->dev, "unsupported ECC size: %d\n", in brcmnand_setup_dev()
2676 chip->ecc.size); in brcmnand_setup_dev()
2677 return -EINVAL; in brcmnand_setup_dev()
2680 cfg->ful_adr_bytes = cfg->blk_adr_bytes; in brcmnand_setup_dev()
2681 if (mtd->writesize > 512) in brcmnand_setup_dev()
2682 cfg->ful_adr_bytes += cfg->col_adr_bytes; in brcmnand_setup_dev()
2684 cfg->ful_adr_bytes += 1; in brcmnand_setup_dev()
2693 dev_info(ctrl->dev, "detected %s\n", msg); in brcmnand_setup_dev()
2696 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); in brcmnand_setup_dev()
2701 /* We need to turn on Read from erased paged protected by ECC */ in brcmnand_setup_dev()
2702 if (ctrl->nand_version >= 0x0702) in brcmnand_setup_dev()
2705 if (ctrl->features & BRCMNAND_HAS_PREFETCH) in brcmnand_setup_dev()
2719 chip->options |= NAND_NO_SUBPAGE_WRITE; in brcmnand_attach_chip()
2725 chip->options |= NAND_USES_DMA; in brcmnand_attach_chip()
2727 if (chip->bbt_options & NAND_BBT_USE_FLASH) in brcmnand_attach_chip()
2728 chip->bbt_options |= NAND_BBT_NO_OOB; in brcmnand_attach_chip()
2731 return -ENXIO; in brcmnand_attach_chip()
2733 chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512; in brcmnand_attach_chip()
2735 /* only use our internal HW threshold */ in brcmnand_attach_chip()
2736 mtd->bitflip_threshold = 1; in brcmnand_attach_chip()
2740 /* If OOB is written with ECC enabled it will cause ECC errors */ in brcmnand_attach_chip()
2741 if (is_hamming_ecc(host->ctrl, &host->hwcfg)) { in brcmnand_attach_chip()
2742 chip->ecc.write_oob = brcmnand_write_oob_raw; in brcmnand_attach_chip()
2743 chip->ecc.read_oob = brcmnand_read_oob_raw; in brcmnand_attach_chip()
2755 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_init_cs()
2756 struct platform_device *pdev = host->pdev; in brcmnand_init_cs()
2762 ret = of_property_read_u32(dn, "reg", &host->cs); in brcmnand_init_cs()
2764 dev_err(&pdev->dev, "can't get chip-select\n"); in brcmnand_init_cs()
2765 return -ENXIO; in brcmnand_init_cs()
2768 mtd = nand_to_mtd(&host->chip); in brcmnand_init_cs()
2769 chip = &host->chip; in brcmnand_init_cs()
2773 mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d", in brcmnand_init_cs()
2774 host->cs); in brcmnand_init_cs()
2775 if (!mtd->name) in brcmnand_init_cs()
2776 return -ENOMEM; in brcmnand_init_cs()
2778 mtd->owner = THIS_MODULE; in brcmnand_init_cs()
2779 mtd->dev.parent = &pdev->dev; in brcmnand_init_cs()
2781 chip->legacy.cmd_ctrl = brcmnand_cmd_ctrl; in brcmnand_init_cs()
2782 chip->legacy.cmdfunc = brcmnand_cmdfunc; in brcmnand_init_cs()
2783 chip->legacy.waitfunc = brcmnand_waitfunc; in brcmnand_init_cs()
2784 chip->legacy.read_byte = brcmnand_read_byte; in brcmnand_init_cs()
2785 chip->legacy.read_buf = brcmnand_read_buf; in brcmnand_init_cs()
2786 chip->legacy.write_buf = brcmnand_write_buf; in brcmnand_init_cs()
2788 chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; in brcmnand_init_cs()
2789 chip->ecc.read_page = brcmnand_read_page; in brcmnand_init_cs()
2790 chip->ecc.write_page = brcmnand_write_page; in brcmnand_init_cs()
2791 chip->ecc.read_page_raw = brcmnand_read_page_raw; in brcmnand_init_cs()
2792 chip->ecc.write_page_raw = brcmnand_write_page_raw; in brcmnand_init_cs()
2793 chip->ecc.write_oob_raw = brcmnand_write_oob_raw; in brcmnand_init_cs()
2794 chip->ecc.read_oob_raw = brcmnand_read_oob_raw; in brcmnand_init_cs()
2795 chip->ecc.read_oob = brcmnand_read_oob; in brcmnand_init_cs()
2796 chip->ecc.write_oob = brcmnand_write_oob; in brcmnand_init_cs()
2798 chip->controller = &ctrl->controller; in brcmnand_init_cs()
2805 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_init_cs()
2823 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_save_restore_cs_config()
2824 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_save_restore_cs_config()
2825 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_save_restore_cs_config()
2827 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_save_restore_cs_config()
2829 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); in brcmnand_save_restore_cs_config()
2830 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); in brcmnand_save_restore_cs_config()
2833 nand_writereg(ctrl, cfg_offs, host->hwcfg.config); in brcmnand_save_restore_cs_config()
2836 host->hwcfg.config_ext); in brcmnand_save_restore_cs_config()
2837 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control); in brcmnand_save_restore_cs_config()
2838 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1); in brcmnand_save_restore_cs_config()
2839 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2); in brcmnand_save_restore_cs_config()
2841 host->hwcfg.config = nand_readreg(ctrl, cfg_offs); in brcmnand_save_restore_cs_config()
2843 host->hwcfg.config_ext = in brcmnand_save_restore_cs_config()
2845 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs); in brcmnand_save_restore_cs_config()
2846 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs); in brcmnand_save_restore_cs_config()
2847 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs); in brcmnand_save_restore_cs_config()
2856 list_for_each_entry(host, &ctrl->host_list, node) in brcmnand_suspend()
2859 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT); in brcmnand_suspend()
2860 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR); in brcmnand_suspend()
2861 ctrl->corr_stat_threshold = in brcmnand_suspend()
2865 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE); in brcmnand_suspend()
2867 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG); in brcmnand_suspend()
2878 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode); in brcmnand_resume()
2883 ctrl->edu_config = edu_readl(ctrl, EDU_CONFIG); in brcmnand_resume()
2884 edu_writel(ctrl, EDU_CONFIG, ctrl->edu_config); in brcmnand_resume()
2889 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select); in brcmnand_resume()
2890 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor); in brcmnand_resume()
2892 ctrl->corr_stat_threshold); in brcmnand_resume()
2893 if (ctrl->soc) { in brcmnand_resume()
2894 /* Clear/re-enable interrupt */ in brcmnand_resume()
2895 ctrl->soc->ctlrdy_ack(ctrl->soc); in brcmnand_resume()
2896 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); in brcmnand_resume()
2899 list_for_each_entry(host, &ctrl->host_list, node) { in brcmnand_resume()
2900 struct nand_chip *chip = &host->chip; in brcmnand_resume()
2904 /* Reset the chip, required by some chips after power-up */ in brcmnand_resume()
2918 { .compatible = "brcm,brcmnand-v2.1" },
2919 { .compatible = "brcm,brcmnand-v2.2" },
2920 { .compatible = "brcm,brcmnand-v4.0" },
2921 { .compatible = "brcm,brcmnand-v5.0" },
2922 { .compatible = "brcm,brcmnand-v6.0" },
2923 { .compatible = "brcm,brcmnand-v6.1" },
2924 { .compatible = "brcm,brcmnand-v6.2" },
2925 { .compatible = "brcm,brcmnand-v7.0" },
2926 { .compatible = "brcm,brcmnand-v7.1" },
2927 { .compatible = "brcm,brcmnand-v7.2" },
2928 { .compatible = "brcm,brcmnand-v7.3" },
2938 struct device *dev = &pdev->dev; in brcmnand_edu_setup()
2939 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); in brcmnand_edu_setup()
2943 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-edu"); in brcmnand_edu_setup()
2945 ctrl->edu_base = devm_ioremap_resource(dev, res); in brcmnand_edu_setup()
2946 if (IS_ERR(ctrl->edu_base)) in brcmnand_edu_setup()
2947 return PTR_ERR(ctrl->edu_base); in brcmnand_edu_setup()
2949 ctrl->edu_offsets = edu_regs; in brcmnand_edu_setup()
2958 ctrl->edu_irq = platform_get_irq_optional(pdev, 1); in brcmnand_edu_setup()
2959 if (ctrl->edu_irq < 0) { in brcmnand_edu_setup()
2963 ret = devm_request_irq(dev, ctrl->edu_irq, in brcmnand_edu_setup()
2965 "brcmnand-edu", ctrl); in brcmnand_edu_setup()
2967 dev_err(ctrl->dev, "can't allocate IRQ %d: error %d\n", in brcmnand_edu_setup()
2968 ctrl->edu_irq, ret); in brcmnand_edu_setup()
2973 ctrl->edu_irq); in brcmnand_edu_setup()
2982 struct device *dev = &pdev->dev; in brcmnand_probe()
2983 struct device_node *dn = dev->of_node, *child; in brcmnand_probe()
2988 /* We only support device-tree instantiation */ in brcmnand_probe()
2990 return -ENODEV; in brcmnand_probe()
2993 return -ENODEV; in brcmnand_probe()
2997 return -ENOMEM; in brcmnand_probe()
3000 ctrl->dev = dev; in brcmnand_probe()
3002 init_completion(&ctrl->done); in brcmnand_probe()
3003 init_completion(&ctrl->dma_done); in brcmnand_probe()
3004 init_completion(&ctrl->edu_done); in brcmnand_probe()
3005 nand_controller_init(&ctrl->controller); in brcmnand_probe()
3006 ctrl->controller.ops = &brcmnand_controller_ops; in brcmnand_probe()
3007 INIT_LIST_HEAD(&ctrl->host_list); in brcmnand_probe()
3011 ctrl->nand_base = devm_ioremap_resource(dev, res); in brcmnand_probe()
3012 if (IS_ERR(ctrl->nand_base)) in brcmnand_probe()
3013 return PTR_ERR(ctrl->nand_base); in brcmnand_probe()
3016 ctrl->clk = devm_clk_get(dev, "nand"); in brcmnand_probe()
3017 if (!IS_ERR(ctrl->clk)) { in brcmnand_probe()
3018 ret = clk_prepare_enable(ctrl->clk); in brcmnand_probe()
3022 ret = PTR_ERR(ctrl->clk); in brcmnand_probe()
3023 if (ret == -EPROBE_DEFER) in brcmnand_probe()
3026 ctrl->clk = NULL; in brcmnand_probe()
3038 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache"); in brcmnand_probe()
3040 ctrl->nand_fc = devm_ioremap_resource(dev, res); in brcmnand_probe()
3041 if (IS_ERR(ctrl->nand_fc)) { in brcmnand_probe()
3042 ret = PTR_ERR(ctrl->nand_fc); in brcmnand_probe()
3046 ctrl->nand_fc = ctrl->nand_base + in brcmnand_probe()
3047 ctrl->reg_offsets[BRCMNAND_FC_BASE]; in brcmnand_probe()
3051 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma"); in brcmnand_probe()
3053 ctrl->flash_dma_base = devm_ioremap_resource(dev, res); in brcmnand_probe()
3054 if (IS_ERR(ctrl->flash_dma_base)) { in brcmnand_probe()
3055 ret = PTR_ERR(ctrl->flash_dma_base); in brcmnand_probe()
3062 ret = -EIO; in brcmnand_probe()
3063 if (ctrl->nand_version >= 0x0700) in brcmnand_probe()
3064 ret = dma_set_mask_and_coherent(&pdev->dev, in brcmnand_probe()
3067 ret = dma_set_mask_and_coherent(&pdev->dev, in brcmnand_probe()
3072 /* linked-list and stop on error */ in brcmnand_probe()
3077 ctrl->dma_desc = dmam_alloc_coherent(dev, in brcmnand_probe()
3078 sizeof(*ctrl->dma_desc), in brcmnand_probe()
3079 &ctrl->dma_pa, GFP_KERNEL); in brcmnand_probe()
3080 if (!ctrl->dma_desc) { in brcmnand_probe()
3081 ret = -ENOMEM; in brcmnand_probe()
3085 ctrl->dma_irq = platform_get_irq(pdev, 1); in brcmnand_probe()
3086 if ((int)ctrl->dma_irq < 0) { in brcmnand_probe()
3088 ret = -ENODEV; in brcmnand_probe()
3092 ret = devm_request_irq(dev, ctrl->dma_irq, in brcmnand_probe()
3097 ctrl->dma_irq, ret); in brcmnand_probe()
3103 ctrl->dma_trans = brcmnand_dma_trans; in brcmnand_probe()
3111 ctrl->dma_trans = brcmnand_edu_trans; in brcmnand_probe()
3120 if (ctrl->features & BRCMNAND_HAS_WP) { in brcmnand_probe()
3129 ctrl->irq = platform_get_irq(pdev, 0); in brcmnand_probe()
3130 if ((int)ctrl->irq < 0) { in brcmnand_probe()
3132 ret = -ENODEV; in brcmnand_probe()
3141 ctrl->soc = soc; in brcmnand_probe()
3143 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0, in brcmnand_probe()
3147 ctrl->soc->ctlrdy_ack(ctrl->soc); in brcmnand_probe()
3148 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); in brcmnand_probe()
3150 /* Use standard interrupt infrastructure */ in brcmnand_probe()
3151 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0, in brcmnand_probe()
3156 ctrl->irq, ret); in brcmnand_probe()
3167 ret = -ENOMEM; in brcmnand_probe()
3170 host->pdev = pdev; in brcmnand_probe()
3171 host->ctrl = ctrl; in brcmnand_probe()
3176 continue; /* Try all chip-selects */ in brcmnand_probe()
3179 list_add_tail(&host->node, &ctrl->host_list); in brcmnand_probe()
3183 /* No chip-selects could initialize properly */ in brcmnand_probe()
3184 if (list_empty(&ctrl->host_list)) { in brcmnand_probe()
3185 ret = -ENODEV; in brcmnand_probe()
3192 clk_disable_unprepare(ctrl->clk); in brcmnand_probe()
3200 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); in brcmnand_remove()
3205 list_for_each_entry(host, &ctrl->host_list, node) { in brcmnand_remove()
3206 chip = &host->chip; in brcmnand_remove()
3212 clk_disable_unprepare(ctrl->clk); in brcmnand_remove()
3214 dev_set_drvdata(&pdev->dev, NULL); in brcmnand_remove()