Lines Matching +full:mmu +full:- +full:500 +full:s
120 * CFI Primary Vendor-Specific Extended Query table 1.5
124 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; in cfi_use_status_reg()
127 return extp && extp->MinorVersion >= '5' && in cfi_use_status_reg()
128 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG; in cfi_use_status_reg()
134 struct cfi_private *cfi = map->fldrv_priv; in cfi_check_err_status()
140 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, in cfi_check_err_status()
141 cfi->device_type, NULL); in cfi_check_err_status()
144 /* The error bits are invalid while the chip's busy */ in cfi_check_err_status()
152 pr_err("%s erase operation failed, status %lx\n", in cfi_check_err_status()
153 map->name, chipstatus); in cfi_check_err_status()
155 pr_err("%s program operation failed, status %lx\n", in cfi_check_err_status()
156 map->name, chipstatus); in cfi_check_err_status()
158 pr_err("%s buffer program command aborted, status %lx\n", in cfi_check_err_status()
159 map->name, chipstatus); in cfi_check_err_status()
161 pr_err("%s sector write protected, status %lx\n", in cfi_check_err_status()
162 map->name, chipstatus); in cfi_check_err_status()
186 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1); in cfi_tell_features()
187 printk(" Address sensitive unlock: %s\n", in cfi_tell_features()
188 (extp->SiliconRevision & 1) ? "Not required" : "Required"); in cfi_tell_features()
190 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend)) in cfi_tell_features()
191 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]); in cfi_tell_features()
193 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend); in cfi_tell_features()
195 if (extp->BlkProt == 0) in cfi_tell_features()
198 printk(" Block protection: %d sectors per group\n", extp->BlkProt); in cfi_tell_features()
201 printk(" Temporary block unprotect: %s\n", in cfi_tell_features()
202 extp->TmpBlkUnprotect ? "Supported" : "Not supported"); in cfi_tell_features()
203 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot); in cfi_tell_features()
204 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps); in cfi_tell_features()
205 printk(" Burst mode: %s\n", in cfi_tell_features()
206 extp->BurstMode ? "Supported" : "Not supported"); in cfi_tell_features()
207 if (extp->PageMode == 0) in cfi_tell_features()
210 printk(" Page mode: %d word page\n", extp->PageMode << 2); in cfi_tell_features()
213 extp->VppMin >> 4, extp->VppMin & 0xf); in cfi_tell_features()
215 extp->VppMax >> 4, extp->VppMax & 0xf); in cfi_tell_features()
217 if (extp->TopBottom < ARRAY_SIZE(top_bottom)) in cfi_tell_features()
218 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]); in cfi_tell_features()
220 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom); in cfi_tell_features()
228 struct map_info *map = mtd->priv; in fixup_amd_bootblock()
229 struct cfi_private *cfi = map->fldrv_priv; in fixup_amd_bootblock()
230 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; in fixup_amd_bootblock()
231 __u8 major = extp->MajorVersion; in fixup_amd_bootblock()
232 __u8 minor = extp->MinorVersion; in fixup_amd_bootblock()
237 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n", in fixup_amd_bootblock()
238 map->name, cfi->mfr, cfi->id); in fixup_amd_bootblock()
241 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. in fixup_amd_bootblock()
245 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) && in fixup_amd_bootblock()
254 * the 8-bit device ID. in fixup_amd_bootblock()
256 (cfi->mfr == CFI_MFR_MACRONIX)) { in fixup_amd_bootblock()
257 pr_debug("%s: Macronix MX29LV400C with bottom boot block" in fixup_amd_bootblock()
258 " detected\n", map->name); in fixup_amd_bootblock()
259 extp->TopBottom = 2; /* bottom boot */ in fixup_amd_bootblock()
261 if (cfi->id & 0x80) { in fixup_amd_bootblock()
262 …printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi-… in fixup_amd_bootblock()
263 extp->TopBottom = 3; /* top boot */ in fixup_amd_bootblock()
265 extp->TopBottom = 2; /* bottom boot */ in fixup_amd_bootblock()
268 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" in fixup_amd_bootblock()
269 " deduced %s from Device ID\n", map->name, major, minor, in fixup_amd_bootblock()
270 extp->TopBottom == 2 ? "bottom" : "top"); in fixup_amd_bootblock()
278 struct map_info *map = mtd->priv; in fixup_use_write_buffers()
279 struct cfi_private *cfi = map->fldrv_priv; in fixup_use_write_buffers()
281 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201) in fixup_use_write_buffers()
284 if (cfi->cfiq->BufWriteTimeoutTyp) { in fixup_use_write_buffers()
286 mtd->_write = cfi_amdstd_write_buffers; in fixup_use_write_buffers()
294 struct map_info *map = mtd->priv; in fixup_convert_atmel_pri()
295 struct cfi_private *cfi = map->fldrv_priv; in fixup_convert_atmel_pri()
296 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; in fixup_convert_atmel_pri()
300 memset((char *)extp + 5, 0, sizeof(*extp) - 5); in fixup_convert_atmel_pri()
303 extp->EraseSuspend = 2; in fixup_convert_atmel_pri()
306 if (cfi->id == AT49BV6416) { in fixup_convert_atmel_pri()
308 extp->TopBottom = 3; in fixup_convert_atmel_pri()
310 extp->TopBottom = 2; in fixup_convert_atmel_pri()
313 extp->TopBottom = 2; in fixup_convert_atmel_pri()
315 extp->TopBottom = 3; in fixup_convert_atmel_pri()
319 cfi->cfiq->BufWriteTimeoutTyp = 0; in fixup_convert_atmel_pri()
320 cfi->cfiq->BufWriteTimeoutMax = 0; in fixup_convert_atmel_pri()
326 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read; in fixup_use_secsi()
327 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read; in fixup_use_secsi()
332 struct map_info *map = mtd->priv; in fixup_use_erase_chip()
333 struct cfi_private *cfi = map->fldrv_priv; in fixup_use_erase_chip()
334 if ((cfi->cfiq->NumEraseRegions == 1) && in fixup_use_erase_chip()
335 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) { in fixup_use_erase_chip()
336 mtd->_erase = cfi_amdstd_erase_chip; in fixup_use_erase_chip()
342 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
347 mtd->_lock = cfi_atmel_lock; in fixup_use_atmel_lock()
348 mtd->_unlock = cfi_atmel_unlock; in fixup_use_atmel_lock()
349 mtd->flags |= MTD_POWERUP_LOCK; in fixup_use_atmel_lock()
354 struct map_info *map = mtd->priv; in fixup_old_sst_eraseregion()
355 struct cfi_private *cfi = map->fldrv_priv; in fixup_old_sst_eraseregion()
359 * sector_erase-size and block_erase-size, although they both operate on the in fixup_old_sst_eraseregion()
361 * sector_erase-size. in fixup_old_sst_eraseregion()
363 cfi->cfiq->NumEraseRegions = 1; in fixup_old_sst_eraseregion()
368 struct map_info *map = mtd->priv; in fixup_sst39vf()
369 struct cfi_private *cfi = map->fldrv_priv; in fixup_sst39vf()
373 cfi->addr_unlock1 = 0x5555; in fixup_sst39vf()
374 cfi->addr_unlock2 = 0x2AAA; in fixup_sst39vf()
379 struct map_info *map = mtd->priv; in fixup_sst39vf_rev_b()
380 struct cfi_private *cfi = map->fldrv_priv; in fixup_sst39vf_rev_b()
384 cfi->addr_unlock1 = 0x555; in fixup_sst39vf_rev_b()
385 cfi->addr_unlock2 = 0x2AA; in fixup_sst39vf_rev_b()
387 cfi->sector_erase_cmd = CMD(0x50); in fixup_sst39vf_rev_b()
392 struct map_info *map = mtd->priv; in fixup_sst38vf640x_sectorsize()
393 struct cfi_private *cfi = map->fldrv_priv; in fixup_sst38vf640x_sectorsize()
401 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff; in fixup_sst38vf640x_sectorsize()
402 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n", in fixup_sst38vf640x_sectorsize()
403 mtd->name); in fixup_sst38vf640x_sectorsize()
408 struct map_info *map = mtd->priv; in fixup_s29gl064n_sectors()
409 struct cfi_private *cfi = map->fldrv_priv; in fixup_s29gl064n_sectors()
411 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { in fixup_s29gl064n_sectors()
412 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; in fixup_s29gl064n_sectors()
413 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", in fixup_s29gl064n_sectors()
414 mtd->name); in fixup_s29gl064n_sectors()
420 struct map_info *map = mtd->priv; in fixup_s29gl032n_sectors()
421 struct cfi_private *cfi = map->fldrv_priv; in fixup_s29gl032n_sectors()
423 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { in fixup_s29gl032n_sectors()
424 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; in fixup_s29gl032n_sectors()
425 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", in fixup_s29gl032n_sectors()
426 mtd->name); in fixup_s29gl032n_sectors()
432 struct map_info *map = mtd->priv; in fixup_s29ns512p_sectors()
433 struct cfi_private *cfi = map->fldrv_priv; in fixup_s29ns512p_sectors()
439 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff; in fixup_s29ns512p_sectors()
440 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", in fixup_s29ns512p_sectors()
441 mtd->name); in fixup_s29ns512p_sectors()
446 struct map_info *map = mtd->priv; in fixup_quirks()
447 struct cfi_private *cfi = map->fldrv_priv; in fixup_quirks()
449 if (cfi->mfr == CFI_MFR_AMD && cfi->id == S29GL064N_MN12) in fixup_quirks()
450 cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA; in fixup_quirks()
453 /* Used to fix CFI-Tables of chips without Extended Query Tables */
503 * to be common. It is like the devices id's are as
516 if (cfi->mfr == CFI_MFR_SAMSUNG) { in cfi_fixup_major_minor()
517 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') || in cfi_fixup_major_minor()
518 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) { in cfi_fixup_major_minor()
524 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu" in cfi_fixup_major_minor()
526 extp->MinorVersion); in cfi_fixup_major_minor()
527 extp->MajorVersion = '1'; in cfi_fixup_major_minor()
534 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) { in cfi_fixup_major_minor()
535 extp->MajorVersion = '1'; in cfi_fixup_major_minor()
536 extp->MinorVersion = '0'; in cfi_fixup_major_minor()
542 if (cfi->mfr == CFI_MFR_INTEL && in is_m29ew()
543 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) || in is_m29ew()
544 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e))) in is_m29ew()
550 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
553 * Erase Confirm -> Suspend -> Program -> Resume
556 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
562 struct cfi_private *cfi = map->fldrv_priv; in cfi_fixup_m29ew_erase_suspend()
569 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
586 * The DELAY value must be tuned based on the customer's platform.
587 * The maximum value that fixes the problem in all cases is 500us.
590 * We have chosen 500µs because this latency is acceptable.
595 * Resolving the Delay After Resume Issue see Micron TN-13-07 in cfi_fixup_m29ew_delay_after_resume()
596 * Worst case delay must be 500µs but 30-50µs should be ok as well in cfi_fixup_m29ew_delay_after_resume()
599 cfi_udelay(500); in cfi_fixup_m29ew_delay_after_resume()
604 struct cfi_private *cfi = map->fldrv_priv; in cfi_cmdset_0002()
605 struct device_node __maybe_unused *np = map->device_node; in cfi_cmdset_0002()
612 mtd->priv = map; in cfi_cmdset_0002()
613 mtd->type = MTD_NORFLASH; in cfi_cmdset_0002()
616 mtd->_erase = cfi_amdstd_erase_varsize; in cfi_cmdset_0002()
617 mtd->_write = cfi_amdstd_write_words; in cfi_cmdset_0002()
618 mtd->_read = cfi_amdstd_read; in cfi_cmdset_0002()
619 mtd->_sync = cfi_amdstd_sync; in cfi_cmdset_0002()
620 mtd->_suspend = cfi_amdstd_suspend; in cfi_cmdset_0002()
621 mtd->_resume = cfi_amdstd_resume; in cfi_cmdset_0002()
622 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg; in cfi_cmdset_0002()
623 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg; in cfi_cmdset_0002()
624 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info; in cfi_cmdset_0002()
625 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info; in cfi_cmdset_0002()
626 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg; in cfi_cmdset_0002()
627 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg; in cfi_cmdset_0002()
628 mtd->flags = MTD_CAP_NORFLASH; in cfi_cmdset_0002()
629 mtd->name = map->name; in cfi_cmdset_0002()
630 mtd->writesize = 1; in cfi_cmdset_0002()
631 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; in cfi_cmdset_0002()
633 pr_debug("MTD %s(): write buffer size %d\n", __func__, in cfi_cmdset_0002()
634 mtd->writebufsize); in cfi_cmdset_0002()
636 mtd->_panic_write = cfi_amdstd_panic_write; in cfi_cmdset_0002()
637 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot; in cfi_cmdset_0002()
639 if (cfi->cfi_mode==CFI_MODE_CFI){ in cfi_cmdset_0002()
641 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR; in cfi_cmdset_0002()
647 * It's a real CFI chip, not one for which the probe in cfi_cmdset_0002()
654 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19 in cfi_cmdset_0002()
656 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf in cfi_cmdset_0002()
659 if (extp->MajorVersion != '1' || in cfi_cmdset_0002()
660 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) { in cfi_cmdset_0002()
663 extp->MajorVersion, extp->MinorVersion, in cfi_cmdset_0002()
664 extp->MajorVersion, extp->MinorVersion); in cfi_cmdset_0002()
671 extp->MajorVersion, extp->MinorVersion); in cfi_cmdset_0002()
674 cfi->cmdset_priv = extp; in cfi_cmdset_0002()
686 np, "use-advanced-sector-protection") in cfi_cmdset_0002()
687 && extp->BlkProtUnprot == 8) { in cfi_cmdset_0002()
689 mtd->_lock = cfi_ppb_lock; in cfi_cmdset_0002()
690 mtd->_unlock = cfi_ppb_unlock; in cfi_cmdset_0002()
691 mtd->_is_locked = cfi_ppb_is_locked; in cfi_cmdset_0002()
695 bootloc = extp->TopBottom; in cfi_cmdset_0002()
697 printk(KERN_WARNING "%s: CFI contains unrecognised boot " in cfi_cmdset_0002()
699 map->name, bootloc); in cfi_cmdset_0002()
703 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) { in cfi_cmdset_0002()
704 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); in cfi_cmdset_0002()
706 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) { in cfi_cmdset_0002()
707 int j = (cfi->cfiq->NumEraseRegions-1)-i; in cfi_cmdset_0002()
709 swap(cfi->cfiq->EraseRegionInfo[i], in cfi_cmdset_0002()
710 cfi->cfiq->EraseRegionInfo[j]); in cfi_cmdset_0002()
714 cfi->addr_unlock1 = 0x555; in cfi_cmdset_0002()
715 cfi->addr_unlock2 = 0x2aa; in cfi_cmdset_0002()
719 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) { in cfi_cmdset_0002()
725 else if (cfi->cfi_mode == CFI_MODE_JEDEC) { in cfi_cmdset_0002()
732 for (i=0; i< cfi->numchips; i++) { in cfi_cmdset_0002()
733 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; in cfi_cmdset_0002()
734 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; in cfi_cmdset_0002()
735 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; in cfi_cmdset_0002()
738 * of struct cfi_ident that probed from chip's CFI aera, if in cfi_cmdset_0002()
742 if (cfi->cfiq->BufWriteTimeoutTyp && in cfi_cmdset_0002()
743 cfi->cfiq->BufWriteTimeoutMax) in cfi_cmdset_0002()
744 cfi->chips[i].buffer_write_time_max = in cfi_cmdset_0002()
745 1 << (cfi->cfiq->BufWriteTimeoutTyp + in cfi_cmdset_0002()
746 cfi->cfiq->BufWriteTimeoutMax); in cfi_cmdset_0002()
748 cfi->chips[i].buffer_write_time_max = 0; in cfi_cmdset_0002()
750 cfi->chips[i].buffer_write_time_max = in cfi_cmdset_0002()
751 max(cfi->chips[i].buffer_write_time_max, 2000); in cfi_cmdset_0002()
753 cfi->chips[i].ref_point_counter = 0; in cfi_cmdset_0002()
754 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0002()
757 map->fldrv = &cfi_amdstd_chipdrv; in cfi_cmdset_0002()
769 struct map_info *map = mtd->priv; in cfi_amdstd_setup()
770 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_setup()
771 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave; in cfi_amdstd_setup()
775 printk(KERN_NOTICE "number of %s chips: %d\n", in cfi_amdstd_setup()
776 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips); in cfi_amdstd_setup()
778 mtd->size = devsize * cfi->numchips; in cfi_amdstd_setup()
780 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips; in cfi_amdstd_setup()
781 mtd->eraseregions = kmalloc_array(mtd->numeraseregions, in cfi_amdstd_setup()
784 if (!mtd->eraseregions) in cfi_amdstd_setup()
787 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) { in cfi_amdstd_setup()
789 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave; in cfi_amdstd_setup()
790 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1; in cfi_amdstd_setup()
792 if (mtd->erasesize < ersize) { in cfi_amdstd_setup()
793 mtd->erasesize = ersize; in cfi_amdstd_setup()
795 for (j=0; j<cfi->numchips; j++) { in cfi_amdstd_setup()
796 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset; in cfi_amdstd_setup()
797 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize; in cfi_amdstd_setup()
798 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum; in cfi_amdstd_setup()
809 register_reboot_notifier(&mtd->reboot_notifier); in cfi_amdstd_setup()
813 kfree(mtd->eraseregions); in cfi_amdstd_setup()
815 kfree(cfi->cmdset_priv); in cfi_amdstd_setup()
822 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
823 * non-suspended sector) and is indicated by no toggle bits toggling.
836 struct cfi_private *cfi = map->fldrv_priv; in chip_ready()
846 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, in chip_ready()
847 cfi->device_type, NULL); in chip_ready()
867 struct cfi_private *cfi = map->fldrv_priv; in chip_good()
870 if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA) in chip_good()
879 struct cfi_private *cfi = map->fldrv_priv; in get_chip()
881 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv; in get_chip()
886 switch (chip->state) { in get_chip()
895 return -EIO; in get_chip()
897 mutex_unlock(&chip->mutex); in get_chip()
899 mutex_lock(&chip->mutex); in get_chip()
911 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) || in get_chip()
913 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2)))) in get_chip()
917 if ((adr & chip->in_progress_block_mask) == in get_chip()
918 chip->in_progress_block_addr) in get_chip()
922 /* It's harmless to issue the Erase-Suspend and Erase-Resume in get_chip()
924 map_write(map, CMD(0xB0), chip->in_progress_block_addr); in get_chip()
925 chip->oldstate = FL_ERASING; in get_chip()
926 chip->state = FL_ERASE_SUSPENDING; in get_chip()
927 chip->erase_suspended = 1; in get_chip()
934 * Send an Erase-Resume command as either in get_chip()
937 * use the erase-in-progress sector. */ in get_chip()
939 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); in get_chip()
940 return -EIO; in get_chip()
943 mutex_unlock(&chip->mutex); in get_chip()
945 mutex_lock(&chip->mutex); in get_chip()
946 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING. in get_chip()
949 chip->state = FL_READY; in get_chip()
954 (!cfip || !(cfip->EraseSuspend&2))) in get_chip()
956 chip->oldstate = chip->state; in get_chip()
957 chip->state = FL_READY; in get_chip()
962 return -EIO; in get_chip()
965 /* Only if there's no operation suspended... */ in get_chip()
966 if (mode == FL_READY && chip->oldstate == FL_READY) in get_chip()
972 add_wait_queue(&chip->wq, &wait); in get_chip()
973 mutex_unlock(&chip->mutex); in get_chip()
975 remove_wait_queue(&chip->wq, &wait); in get_chip()
976 mutex_lock(&chip->mutex); in get_chip()
984 struct cfi_private *cfi = map->fldrv_priv; in put_chip()
986 switch(chip->oldstate) { in put_chip()
989 chip->in_progress_block_addr); in put_chip()
990 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); in put_chip()
992 chip->oldstate = FL_READY; in put_chip()
993 chip->state = FL_ERASING; in put_chip()
997 chip->state = chip->oldstate; in put_chip()
998 chip->oldstate = FL_READY; in put_chip()
1005 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate); in put_chip()
1007 wake_up(&chip->wq); in put_chip()
1017 * may be called and nothing else (it's a good thing to inspect generated
1027 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ in xip_disable()
1034 struct cfi_private *cfi = map->fldrv_priv; in xip_enable()
1036 if (chip->state != FL_POINT && chip->state != FL_READY) { in xip_enable()
1038 chip->state = FL_READY; in xip_enable()
1061 struct cfi_private *cfi = map->fldrv_priv; in xip_udelay()
1062 struct cfi_pri_amdstd *extp = cfi->cmdset_priv; in xip_udelay()
1070 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) && in xip_udelay()
1071 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { in xip_udelay()
1073 * Let's suspend the erase operation when supported. in xip_udelay()
1083 usec -= xip_elapsed_since(start); in xip_udelay()
1099 oldstate = chip->state; in xip_udelay()
1102 chip->state = FL_XIP_WHILE_ERASING; in xip_udelay()
1103 chip->erase_suspended = 1; in xip_udelay()
1108 mutex_unlock(&chip->mutex); in xip_udelay()
1115 * a suspended erase state. If so let's wait in xip_udelay()
1116 * until it's done. in xip_udelay()
1118 mutex_lock(&chip->mutex); in xip_udelay()
1119 while (chip->state != FL_XIP_WHILE_ERASING) { in xip_udelay()
1122 add_wait_queue(&chip->wq, &wait); in xip_udelay()
1123 mutex_unlock(&chip->mutex); in xip_udelay()
1125 remove_wait_queue(&chip->wq, &wait); in xip_udelay()
1126 mutex_lock(&chip->mutex); in xip_udelay()
1134 map_write(map, cfi->sector_erase_cmd, adr); in xip_udelay()
1135 chip->state = oldstate; in xip_udelay()
1177 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1190 mutex_unlock(&chip->mutex); \
1192 mutex_lock(&chip->mutex); \
1197 mutex_unlock(&chip->mutex); \
1200 mutex_lock(&chip->mutex); \
1208 struct cfi_private *cfi = map->fldrv_priv; in do_read_onechip()
1211 adr += chip->start; in do_read_onechip()
1214 cmd_addr = adr & ~(map_bankwidth(map)-1); in do_read_onechip()
1216 mutex_lock(&chip->mutex); in do_read_onechip()
1219 mutex_unlock(&chip->mutex); in do_read_onechip()
1223 if (chip->state != FL_POINT && chip->state != FL_READY) { in do_read_onechip()
1225 chip->state = FL_READY; in do_read_onechip()
1232 mutex_unlock(&chip->mutex); in do_read_onechip()
1239 struct map_info *map = mtd->priv; in cfi_amdstd_read()
1240 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_read()
1246 chipnum = (from >> cfi->chipshift); in cfi_amdstd_read()
1247 ofs = from - (chipnum << cfi->chipshift); in cfi_amdstd_read()
1252 if (chipnum >= cfi->numchips) in cfi_amdstd_read()
1255 if ((len + ofs -1) >> cfi->chipshift) in cfi_amdstd_read()
1256 thislen = (1<<cfi->chipshift) - ofs; in cfi_amdstd_read()
1260 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); in cfi_amdstd_read()
1265 len -= thislen; in cfi_amdstd_read()
1280 struct cfi_private *cfi = map->fldrv_priv; in otp_enter()
1282 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in otp_enter()
1283 cfi->device_type, NULL); in otp_enter()
1284 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in otp_enter()
1285 cfi->device_type, NULL); in otp_enter()
1286 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, in otp_enter()
1287 cfi->device_type, NULL); in otp_enter()
1289 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); in otp_enter()
1295 struct cfi_private *cfi = map->fldrv_priv; in otp_exit()
1297 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in otp_exit()
1298 cfi->device_type, NULL); in otp_exit()
1299 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in otp_exit()
1300 cfi->device_type, NULL); in otp_exit()
1301 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, in otp_exit()
1302 cfi->device_type, NULL); in otp_exit()
1303 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, in otp_exit()
1304 cfi->device_type, NULL); in otp_exit()
1306 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); in otp_exit()
1317 mutex_lock(&chip->mutex); in do_read_secsi_onechip()
1319 if (chip->state != FL_READY){ in do_read_secsi_onechip()
1321 add_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip()
1323 mutex_unlock(&chip->mutex); in do_read_secsi_onechip()
1326 remove_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip()
1331 adr += chip->start; in do_read_secsi_onechip()
1333 chip->state = FL_READY; in do_read_secsi_onechip()
1339 wake_up(&chip->wq); in do_read_secsi_onechip()
1340 mutex_unlock(&chip->mutex); in do_read_secsi_onechip()
1347 struct map_info *map = mtd->priv; in cfi_amdstd_secsi_read()
1348 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_secsi_read()
1361 if (chipnum >= cfi->numchips) in cfi_amdstd_secsi_read()
1364 if ((len + ofs -1) >> 3) in cfi_amdstd_secsi_read()
1365 thislen = (1<<3) - ofs; in cfi_amdstd_secsi_read()
1369 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, in cfi_amdstd_secsi_read()
1375 len -= thislen; in cfi_amdstd_secsi_read()
1393 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); in do_otp_write()
1394 int gap = adr - bus_ofs; in do_otp_write()
1395 int n = min_t(int, len, map_bankwidth(map) - gap); in do_otp_write()
1412 len -= n; in do_otp_write()
1421 struct cfi_private *cfi = map->fldrv_priv; in do_otp_lock()
1428 return -EINVAL; in do_otp_lock()
1430 mutex_lock(&chip->mutex); in do_otp_lock()
1431 ret = get_chip(map, chip, chip->start, FL_LOCKING); in do_otp_lock()
1433 mutex_unlock(&chip->mutex); in do_otp_lock()
1436 chip->state = FL_LOCKING; in do_otp_lock()
1439 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_otp_lock()
1440 cfi->device_type, NULL); in do_otp_lock()
1441 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in do_otp_lock()
1442 cfi->device_type, NULL); in do_otp_lock()
1443 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, in do_otp_lock()
1444 cfi->device_type, NULL); in do_otp_lock()
1454 map_write(map, CMD(0xA0), chip->start); in do_otp_lock()
1455 map_write(map, CMD(lockreg), chip->start); in do_otp_lock()
1465 ret = -EIO; in do_otp_lock()
1472 map_write(map, CMD(0x90), chip->start); in do_otp_lock()
1473 map_write(map, CMD(0x00), chip->start); in do_otp_lock()
1475 chip->state = FL_READY; in do_otp_lock()
1476 put_chip(map, chip, chip->start); in do_otp_lock()
1477 mutex_unlock(&chip->mutex); in do_otp_lock()
1486 struct map_info *map = mtd->priv; in cfi_amdstd_otp_walk()
1487 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_otp_walk()
1488 int ofs_factor = cfi->interleave * cfi->device_type; in cfi_amdstd_otp_walk()
1501 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) { in cfi_amdstd_otp_walk()
1502 chip = &cfi->chips[chipnum]; in cfi_amdstd_otp_walk()
1508 base = chip->start; in cfi_amdstd_otp_walk()
1512 mutex_lock(&chip->mutex); in cfi_amdstd_otp_walk()
1515 mutex_unlock(&chip->mutex); in cfi_amdstd_otp_walk()
1522 mutex_unlock(&chip->mutex); in cfi_amdstd_otp_walk()
1533 mutex_lock(&chip->mutex); in cfi_amdstd_otp_walk()
1536 mutex_unlock(&chip->mutex); in cfi_amdstd_otp_walk()
1541 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, in cfi_amdstd_otp_walk()
1542 chip->start, map, cfi, in cfi_amdstd_otp_walk()
1543 cfi->device_type, NULL); in cfi_amdstd_otp_walk()
1544 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, in cfi_amdstd_otp_walk()
1545 chip->start, map, cfi, in cfi_amdstd_otp_walk()
1546 cfi->device_type, NULL); in cfi_amdstd_otp_walk()
1547 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, in cfi_amdstd_otp_walk()
1548 chip->start, map, cfi, in cfi_amdstd_otp_walk()
1549 cfi->device_type, NULL); in cfi_amdstd_otp_walk()
1553 map_write(map, CMD(0x90), chip->start); in cfi_amdstd_otp_walk()
1554 map_write(map, CMD(0x00), chip->start); in cfi_amdstd_otp_walk()
1555 put_chip(map, chip, chip->start); in cfi_amdstd_otp_walk()
1556 mutex_unlock(&chip->mutex); in cfi_amdstd_otp_walk()
1571 len -= sizeof(*otpinfo); in cfi_amdstd_otp_walk()
1573 return -ENOSPC; in cfi_amdstd_otp_walk()
1575 otpinfo->start = from; in cfi_amdstd_otp_walk()
1576 otpinfo->length = otpsize; in cfi_amdstd_otp_walk()
1577 otpinfo->locked = otplocked; in cfi_amdstd_otp_walk()
1583 size = (len < otpsize - from) ? len : otpsize - from; in cfi_amdstd_otp_walk()
1590 len -= size; in cfi_amdstd_otp_walk()
1594 from -= otpsize; in cfi_amdstd_otp_walk()
1664 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_write_oneword_once()
1665 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_write_oneword_once()
1666 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_write_oneword_once()
1668 chip->state = mode; in do_write_oneword_once()
1672 chip->word_write_time); in do_write_oneword_once()
1677 if (chip->state != mode) { in do_write_oneword_once()
1678 /* Someone's suspended the write. Sleep */ in do_write_oneword_once()
1682 add_wait_queue(&chip->wq, &wait); in do_write_oneword_once()
1683 mutex_unlock(&chip->mutex); in do_write_oneword_once()
1685 remove_wait_queue(&chip->wq, &wait); in do_write_oneword_once()
1687 mutex_lock(&chip->mutex); in do_write_oneword_once()
1698 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__); in do_write_oneword_once()
1700 ret = -EIO; in do_write_oneword_once()
1706 ret = -EIO; in do_write_oneword_once()
1723 mutex_lock(&chip->mutex); in do_write_oneword_start()
1727 mutex_unlock(&chip->mutex); in do_write_oneword_start()
1744 chip->state = FL_READY; in do_write_oneword_done()
1748 mutex_unlock(&chip->mutex); in do_write_oneword_done()
1756 struct cfi_private *cfi = map->fldrv_priv; in do_write_oneword_retry()
1763 * present - it saves time and works around buggy chips that corrupt in do_write_oneword_retry()
1769 pr_debug("MTD %s(): NOP\n", __func__); in do_write_oneword_retry()
1781 map_write(map, CMD(0xF0), chip->start); in do_write_oneword_retry()
1782 /* FIXME - should have reset delay before continuing */ in do_write_oneword_retry()
1800 adr += chip->start; in do_write_oneword()
1802 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr, in do_write_oneword()
1820 struct map_info *map = mtd->priv; in cfi_amdstd_write_words()
1821 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_write_words()
1827 chipnum = to >> cfi->chipshift; in cfi_amdstd_write_words()
1828 ofs = to - (chipnum << cfi->chipshift); in cfi_amdstd_write_words()
1829 chipstart = cfi->chips[chipnum].start; in cfi_amdstd_write_words()
1831 /* If it's not bus-aligned, do the first byte write */ in cfi_amdstd_write_words()
1832 if (ofs & (map_bankwidth(map)-1)) { in cfi_amdstd_write_words()
1833 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); in cfi_amdstd_write_words()
1834 int i = ofs - bus_ofs; in cfi_amdstd_write_words()
1839 mutex_lock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1841 if (cfi->chips[chipnum].state != FL_READY) { in cfi_amdstd_write_words()
1843 add_wait_queue(&cfi->chips[chipnum].wq, &wait); in cfi_amdstd_write_words()
1845 mutex_unlock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1848 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); in cfi_amdstd_write_words()
1855 mutex_unlock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1858 n = min_t(int, len, map_bankwidth(map)-i); in cfi_amdstd_write_words()
1862 ret = do_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_write_words()
1870 len -= n; in cfi_amdstd_write_words()
1872 if (ofs >> cfi->chipshift) { in cfi_amdstd_write_words()
1875 if (chipnum == cfi->numchips) in cfi_amdstd_write_words()
1886 ret = do_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_write_words()
1894 len -= map_bankwidth(map); in cfi_amdstd_write_words()
1896 if (ofs >> cfi->chipshift) { in cfi_amdstd_write_words()
1899 if (chipnum == cfi->numchips) in cfi_amdstd_write_words()
1901 chipstart = cfi->chips[chipnum].start; in cfi_amdstd_write_words()
1906 if (len & (map_bankwidth(map)-1)) { in cfi_amdstd_write_words()
1910 mutex_lock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1912 if (cfi->chips[chipnum].state != FL_READY) { in cfi_amdstd_write_words()
1914 add_wait_queue(&cfi->chips[chipnum].wq, &wait); in cfi_amdstd_write_words()
1916 mutex_unlock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1919 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); in cfi_amdstd_write_words()
1925 mutex_unlock(&cfi->chips[chipnum].mutex); in cfi_amdstd_write_words()
1929 ret = do_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_write_words()
1953 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max); in do_write_buffer_wait()
1957 if (chip->state != FL_WRITING) { in do_write_buffer_wait()
1958 /* Someone's suspended the write. Sleep */ in do_write_buffer_wait()
1962 add_wait_queue(&chip->wq, &wait); in do_write_buffer_wait()
1963 mutex_unlock(&chip->mutex); in do_write_buffer_wait()
1965 remove_wait_queue(&chip->wq, &wait); in do_write_buffer_wait()
1967 mutex_lock(&chip->mutex); in do_write_buffer_wait()
1977 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n", in do_write_buffer_wait()
1979 ret = -EIO; in do_write_buffer_wait()
1985 ret = -EIO; in do_write_buffer_wait()
2001 * Recovery from write-buffer programming failures requires in do_write_buffer_reset()
2002 * the write-to-buffer-reset sequence. Since the last part in do_write_buffer_reset()
2008 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_write_buffer_reset()
2009 cfi->device_type, NULL); in do_write_buffer_reset()
2010 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in do_write_buffer_reset()
2011 cfi->device_type, NULL); in do_write_buffer_reset()
2012 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, in do_write_buffer_reset()
2013 cfi->device_type, NULL); in do_write_buffer_reset()
2015 /* FIXME - should have reset delay before continuing */ in do_write_buffer_reset()
2025 struct cfi_private *cfi = map->fldrv_priv; in do_write_buffer()
2031 adr += chip->start; in do_write_buffer()
2034 mutex_lock(&chip->mutex); in do_write_buffer()
2037 mutex_unlock(&chip->mutex); in do_write_buffer()
2043 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", in do_write_buffer()
2050 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_write_buffer()
2051 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_write_buffer()
2056 chip->state = FL_WRITING_TO_BUFFER; in do_write_buffer()
2060 map_write(map, CMD(words - 1), cmd_adr); in do_write_buffer()
2070 z -= map_bankwidth(map); in do_write_buffer()
2076 chip->state = FL_WRITING; in do_write_buffer()
2080 chip->word_write_time); in do_write_buffer()
2088 chip->state = FL_READY; in do_write_buffer()
2091 mutex_unlock(&chip->mutex); in do_write_buffer()
2100 struct map_info *map = mtd->priv; in cfi_amdstd_write_buffers()
2101 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_write_buffers()
2102 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; in cfi_amdstd_write_buffers()
2107 chipnum = to >> cfi->chipshift; in cfi_amdstd_write_buffers()
2108 ofs = to - (chipnum << cfi->chipshift); in cfi_amdstd_write_buffers()
2110 /* If it's not bus-aligned, do the first word write */ in cfi_amdstd_write_buffers()
2111 if (ofs & (map_bankwidth(map)-1)) { in cfi_amdstd_write_buffers()
2112 size_t local_len = (-ofs)&(map_bankwidth(map)-1); in cfi_amdstd_write_buffers()
2115 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), in cfi_amdstd_write_buffers()
2121 len -= local_len; in cfi_amdstd_write_buffers()
2123 if (ofs >> cfi->chipshift) { in cfi_amdstd_write_buffers()
2126 if (chipnum == cfi->numchips) in cfi_amdstd_write_buffers()
2134 int size = wbufsize - (ofs & (wbufsize-1)); in cfi_amdstd_write_buffers()
2139 size -= size % map_bankwidth(map); in cfi_amdstd_write_buffers()
2141 ret = do_write_buffer(map, &cfi->chips[chipnum], in cfi_amdstd_write_buffers()
2149 len -= size; in cfi_amdstd_write_buffers()
2151 if (ofs >> cfi->chipshift) { in cfi_amdstd_write_buffers()
2154 if (chipnum == cfi->numchips) in cfi_amdstd_write_buffers()
2162 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift), in cfi_amdstd_write_buffers()
2184 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_panic_wait()
2192 if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL)) in cfi_amdstd_panic_wait()
2205 map_write(map, CMD(0xF0), chip->start); in cfi_amdstd_panic_wait()
2215 retries--; in cfi_amdstd_panic_wait()
2219 return -EBUSY; in cfi_amdstd_panic_wait()
2237 struct cfi_private *cfi = map->fldrv_priv; in do_panic_write_oneword()
2243 adr += chip->start; in do_panic_write_oneword()
2249 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n", in do_panic_write_oneword()
2254 * present - it saves time and works around buggy chips that corrupt in do_panic_write_oneword()
2260 pr_debug("MTD %s(): NOP\n", __func__); in do_panic_write_oneword()
2267 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_panic_write_oneword()
2268 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_panic_write_oneword()
2269 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_panic_write_oneword()
2282 map_write(map, CMD(0xF0), chip->start); in do_panic_write_oneword()
2283 /* FIXME - should have reset delay before continuing */ in do_panic_write_oneword()
2288 ret = -EIO; in do_panic_write_oneword()
2312 struct map_info *map = mtd->priv; in cfi_amdstd_panic_write()
2313 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_panic_write()
2318 chipnum = to >> cfi->chipshift; in cfi_amdstd_panic_write()
2319 ofs = to - (chipnum << cfi->chipshift); in cfi_amdstd_panic_write()
2320 chipstart = cfi->chips[chipnum].start; in cfi_amdstd_panic_write()
2322 /* If it's not bus aligned, do the first byte write */ in cfi_amdstd_panic_write()
2323 if (ofs & (map_bankwidth(map) - 1)) { in cfi_amdstd_panic_write()
2324 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); in cfi_amdstd_panic_write()
2325 int i = ofs - bus_ofs; in cfi_amdstd_panic_write()
2329 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); in cfi_amdstd_panic_write()
2337 n = min_t(int, len, map_bankwidth(map) - i); in cfi_amdstd_panic_write()
2341 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_panic_write()
2349 len -= n; in cfi_amdstd_panic_write()
2351 if (ofs >> cfi->chipshift) { in cfi_amdstd_panic_write()
2354 if (chipnum == cfi->numchips) in cfi_amdstd_panic_write()
2365 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_panic_write()
2373 len -= map_bankwidth(map); in cfi_amdstd_panic_write()
2375 if (ofs >> cfi->chipshift) { in cfi_amdstd_panic_write()
2378 if (chipnum == cfi->numchips) in cfi_amdstd_panic_write()
2381 chipstart = cfi->chips[chipnum].start; in cfi_amdstd_panic_write()
2386 if (len & (map_bankwidth(map) - 1)) { in cfi_amdstd_panic_write()
2389 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); in cfi_amdstd_panic_write()
2397 ret = do_panic_write_oneword(map, &cfi->chips[chipnum], in cfi_amdstd_panic_write()
2415 struct cfi_private *cfi = map->fldrv_priv; in do_erase_chip()
2423 adr = cfi->addr_unlock1; in do_erase_chip()
2425 mutex_lock(&chip->mutex); in do_erase_chip()
2428 mutex_unlock(&chip->mutex); in do_erase_chip()
2432 pr_debug("MTD %s(): ERASE 0x%.8lx\n", in do_erase_chip()
2433 __func__, chip->start); in do_erase_chip()
2435 XIP_INVAL_CACHED_RANGE(map, adr, map->size); in do_erase_chip()
2440 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2441 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2442 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2443 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2444 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2445 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_chip()
2447 chip->state = FL_ERASING; in do_erase_chip()
2448 chip->erase_suspended = 0; in do_erase_chip()
2449 chip->in_progress_block_addr = adr; in do_erase_chip()
2450 chip->in_progress_block_mask = ~(map->size - 1); in do_erase_chip()
2453 adr, map->size, in do_erase_chip()
2454 chip->erase_time*500); in do_erase_chip()
2459 if (chip->state != FL_ERASING) { in do_erase_chip()
2460 /* Someone's suspended the erase. Sleep */ in do_erase_chip()
2462 add_wait_queue(&chip->wq, &wait); in do_erase_chip()
2463 mutex_unlock(&chip->mutex); in do_erase_chip()
2465 remove_wait_queue(&chip->wq, &wait); in do_erase_chip()
2466 mutex_lock(&chip->mutex); in do_erase_chip()
2469 if (chip->erase_suspended) { in do_erase_chip()
2473 chip->erase_suspended = 0; in do_erase_chip()
2478 ret = -EIO; in do_erase_chip()
2483 printk(KERN_WARNING "MTD %s(): software timeout\n", in do_erase_chip()
2485 ret = -EIO; in do_erase_chip()
2495 map_write(map, CMD(0xF0), chip->start); in do_erase_chip()
2496 /* FIXME - should have reset delay before continuing */ in do_erase_chip()
2504 chip->state = FL_READY; in do_erase_chip()
2508 mutex_unlock(&chip->mutex); in do_erase_chip()
2516 struct cfi_private *cfi = map->fldrv_priv; in do_erase_oneblock()
2523 adr += chip->start; in do_erase_oneblock()
2525 mutex_lock(&chip->mutex); in do_erase_oneblock()
2528 mutex_unlock(&chip->mutex); in do_erase_oneblock()
2532 pr_debug("MTD %s(): ERASE 0x%.8lx\n", in do_erase_oneblock()
2540 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_oneblock()
2541 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_oneblock()
2542 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_oneblock()
2543 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_oneblock()
2544 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); in do_erase_oneblock()
2545 map_write(map, cfi->sector_erase_cmd, adr); in do_erase_oneblock()
2547 chip->state = FL_ERASING; in do_erase_oneblock()
2548 chip->erase_suspended = 0; in do_erase_oneblock()
2549 chip->in_progress_block_addr = adr; in do_erase_oneblock()
2550 chip->in_progress_block_mask = ~(len - 1); in do_erase_oneblock()
2554 chip->erase_time*500); in do_erase_oneblock()
2559 if (chip->state != FL_ERASING) { in do_erase_oneblock()
2560 /* Someone's suspended the erase. Sleep */ in do_erase_oneblock()
2562 add_wait_queue(&chip->wq, &wait); in do_erase_oneblock()
2563 mutex_unlock(&chip->mutex); in do_erase_oneblock()
2565 remove_wait_queue(&chip->wq, &wait); in do_erase_oneblock()
2566 mutex_lock(&chip->mutex); in do_erase_oneblock()
2569 if (chip->erase_suspended) { in do_erase_oneblock()
2573 chip->erase_suspended = 0; in do_erase_oneblock()
2578 ret = -EIO; in do_erase_oneblock()
2583 printk(KERN_WARNING "MTD %s(): software timeout\n", in do_erase_oneblock()
2585 ret = -EIO; in do_erase_oneblock()
2595 map_write(map, CMD(0xF0), chip->start); in do_erase_oneblock()
2596 /* FIXME - should have reset delay before continuing */ in do_erase_oneblock()
2604 chip->state = FL_READY; in do_erase_oneblock()
2608 mutex_unlock(&chip->mutex); in do_erase_oneblock()
2615 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr, in cfi_amdstd_erase_varsize()
2616 instr->len, NULL); in cfi_amdstd_erase_varsize()
2622 struct map_info *map = mtd->priv; in cfi_amdstd_erase_chip()
2623 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_erase_chip()
2625 if (instr->addr != 0) in cfi_amdstd_erase_chip()
2626 return -EINVAL; in cfi_amdstd_erase_chip()
2628 if (instr->len != mtd->size) in cfi_amdstd_erase_chip()
2629 return -EINVAL; in cfi_amdstd_erase_chip()
2631 return do_erase_chip(map, &cfi->chips[0]); in cfi_amdstd_erase_chip()
2637 struct cfi_private *cfi = map->fldrv_priv; in do_atmel_lock()
2640 mutex_lock(&chip->mutex); in do_atmel_lock()
2641 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); in do_atmel_lock()
2644 chip->state = FL_LOCKING; in do_atmel_lock()
2646 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); in do_atmel_lock()
2648 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_atmel_lock()
2649 cfi->device_type, NULL); in do_atmel_lock()
2650 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in do_atmel_lock()
2651 cfi->device_type, NULL); in do_atmel_lock()
2652 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, in do_atmel_lock()
2653 cfi->device_type, NULL); in do_atmel_lock()
2654 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_atmel_lock()
2655 cfi->device_type, NULL); in do_atmel_lock()
2656 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in do_atmel_lock()
2657 cfi->device_type, NULL); in do_atmel_lock()
2658 map_write(map, CMD(0x40), chip->start + adr); in do_atmel_lock()
2660 chip->state = FL_READY; in do_atmel_lock()
2661 put_chip(map, chip, adr + chip->start); in do_atmel_lock()
2665 mutex_unlock(&chip->mutex); in do_atmel_lock()
2672 struct cfi_private *cfi = map->fldrv_priv; in do_atmel_unlock()
2675 mutex_lock(&chip->mutex); in do_atmel_unlock()
2676 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); in do_atmel_unlock()
2679 chip->state = FL_UNLOCKING; in do_atmel_unlock()
2681 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len); in do_atmel_unlock()
2683 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_atmel_unlock()
2684 cfi->device_type, NULL); in do_atmel_unlock()
2687 chip->state = FL_READY; in do_atmel_unlock()
2688 put_chip(map, chip, adr + chip->start); in do_atmel_unlock()
2692 mutex_unlock(&chip->mutex); in do_atmel_unlock()
2707 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2724 struct cfi_private *cfi = map->fldrv_priv; in do_ppb_xxlock()
2728 adr += chip->start; in do_ppb_xxlock()
2729 mutex_lock(&chip->mutex); in do_ppb_xxlock()
2732 mutex_unlock(&chip->mutex); in do_ppb_xxlock()
2736 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len); in do_ppb_xxlock()
2738 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, in do_ppb_xxlock()
2739 cfi->device_type, NULL); in do_ppb_xxlock()
2740 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, in do_ppb_xxlock()
2741 cfi->device_type, NULL); in do_ppb_xxlock()
2743 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, in do_ppb_xxlock()
2744 cfi->device_type, NULL); in do_ppb_xxlock()
2747 chip->state = FL_LOCKING; in do_ppb_xxlock()
2755 chip->state = FL_UNLOCKING; in do_ppb_xxlock()
2756 map_write(map, CMD(0x80), chip->start); in do_ppb_xxlock()
2757 map_write(map, CMD(0x30), chip->start); in do_ppb_xxlock()
2759 chip->state = FL_JEDEC_QUERY; in do_ppb_xxlock()
2760 /* Return locked status: 0->locked, 1->unlocked */ in do_ppb_xxlock()
2768 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */ in do_ppb_xxlock()
2775 ret = -EIO; in do_ppb_xxlock()
2783 map_write(map, CMD(0x90), chip->start); in do_ppb_xxlock()
2784 map_write(map, CMD(0x00), chip->start); in do_ppb_xxlock()
2786 chip->state = FL_READY; in do_ppb_xxlock()
2788 mutex_unlock(&chip->mutex); in do_ppb_xxlock()
2803 struct mtd_erase_region_info *regions = mtd->eraseregions; in cfi_ppb_unlock()
2804 struct map_info *map = mtd->priv; in cfi_ppb_unlock()
2805 struct cfi_private *cfi = map->fldrv_priv; in cfi_ppb_unlock()
2818 * We need to re-lock all previously locked sectors. So lets in cfi_ppb_unlock()
2823 for (i = 0; i < mtd->numeraseregions; i++) in cfi_ppb_unlock()
2828 return -ENOMEM; in cfi_ppb_unlock()
2839 length = mtd->size; in cfi_ppb_unlock()
2847 * status at "unlocked" (locked=0) for the final re-locking. in cfi_ppb_unlock()
2850 sect[sectors].chip = &cfi->chips[chipnum]; in cfi_ppb_unlock()
2853 map, &cfi->chips[chipnum], adr, 0, in cfi_ppb_unlock()
2859 length -= size; in cfi_ppb_unlock()
2864 if (adr >> cfi->chipshift) { in cfi_ppb_unlock()
2870 if (chipnum >= cfi->numchips) in cfi_ppb_unlock()
2879 return -EINVAL; in cfi_ppb_unlock()
2893 * We need to re-lock all previously locked sectors. in cfi_ppb_unlock()
2914 struct map_info *map = mtd->priv; in cfi_amdstd_sync()
2915 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_sync()
2921 for (i=0; !ret && i<cfi->numchips; i++) { in cfi_amdstd_sync()
2922 chip = &cfi->chips[i]; in cfi_amdstd_sync()
2925 mutex_lock(&chip->mutex); in cfi_amdstd_sync()
2927 switch(chip->state) { in cfi_amdstd_sync()
2932 chip->oldstate = chip->state; in cfi_amdstd_sync()
2933 chip->state = FL_SYNCING; in cfi_amdstd_sync()
2934 /* No need to wake_up() on this state change - in cfi_amdstd_sync()
2940 mutex_unlock(&chip->mutex); in cfi_amdstd_sync()
2946 add_wait_queue(&chip->wq, &wait); in cfi_amdstd_sync()
2948 mutex_unlock(&chip->mutex); in cfi_amdstd_sync()
2952 remove_wait_queue(&chip->wq, &wait); in cfi_amdstd_sync()
2960 for (i--; i >=0; i--) { in cfi_amdstd_sync()
2961 chip = &cfi->chips[i]; in cfi_amdstd_sync()
2963 mutex_lock(&chip->mutex); in cfi_amdstd_sync()
2965 if (chip->state == FL_SYNCING) { in cfi_amdstd_sync()
2966 chip->state = chip->oldstate; in cfi_amdstd_sync()
2967 wake_up(&chip->wq); in cfi_amdstd_sync()
2969 mutex_unlock(&chip->mutex); in cfi_amdstd_sync()
2976 struct map_info *map = mtd->priv; in cfi_amdstd_suspend()
2977 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_suspend()
2982 for (i=0; !ret && i<cfi->numchips; i++) { in cfi_amdstd_suspend()
2983 chip = &cfi->chips[i]; in cfi_amdstd_suspend()
2985 mutex_lock(&chip->mutex); in cfi_amdstd_suspend()
2987 switch(chip->state) { in cfi_amdstd_suspend()
2992 chip->oldstate = chip->state; in cfi_amdstd_suspend()
2993 chip->state = FL_PM_SUSPENDED; in cfi_amdstd_suspend()
2994 /* No need to wake_up() on this state change - in cfi_amdstd_suspend()
3003 ret = -EAGAIN; in cfi_amdstd_suspend()
3006 mutex_unlock(&chip->mutex); in cfi_amdstd_suspend()
3012 for (i--; i >=0; i--) { in cfi_amdstd_suspend()
3013 chip = &cfi->chips[i]; in cfi_amdstd_suspend()
3015 mutex_lock(&chip->mutex); in cfi_amdstd_suspend()
3017 if (chip->state == FL_PM_SUSPENDED) { in cfi_amdstd_suspend()
3018 chip->state = chip->oldstate; in cfi_amdstd_suspend()
3019 wake_up(&chip->wq); in cfi_amdstd_suspend()
3021 mutex_unlock(&chip->mutex); in cfi_amdstd_suspend()
3031 struct map_info *map = mtd->priv; in cfi_amdstd_resume()
3032 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_resume()
3036 for (i=0; i<cfi->numchips; i++) { in cfi_amdstd_resume()
3038 chip = &cfi->chips[i]; in cfi_amdstd_resume()
3040 mutex_lock(&chip->mutex); in cfi_amdstd_resume()
3042 if (chip->state == FL_PM_SUSPENDED) { in cfi_amdstd_resume()
3043 chip->state = FL_READY; in cfi_amdstd_resume()
3044 map_write(map, CMD(0xF0), chip->start); in cfi_amdstd_resume()
3045 wake_up(&chip->wq); in cfi_amdstd_resume()
3050 mutex_unlock(&chip->mutex); in cfi_amdstd_resume()
3063 struct map_info *map = mtd->priv; in cfi_amdstd_reset()
3064 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_reset()
3068 for (i = 0; i < cfi->numchips; i++) { in cfi_amdstd_reset()
3070 chip = &cfi->chips[i]; in cfi_amdstd_reset()
3072 mutex_lock(&chip->mutex); in cfi_amdstd_reset()
3074 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); in cfi_amdstd_reset()
3076 map_write(map, CMD(0xF0), chip->start); in cfi_amdstd_reset()
3077 chip->state = FL_SHUTDOWN; in cfi_amdstd_reset()
3078 put_chip(map, chip, chip->start); in cfi_amdstd_reset()
3081 mutex_unlock(&chip->mutex); in cfi_amdstd_reset()
3101 struct map_info *map = mtd->priv; in cfi_amdstd_destroy()
3102 struct cfi_private *cfi = map->fldrv_priv; in cfi_amdstd_destroy()
3105 unregister_reboot_notifier(&mtd->reboot_notifier); in cfi_amdstd_destroy()
3106 kfree(cfi->cmdset_priv); in cfi_amdstd_destroy()
3107 kfree(cfi->cfiq); in cfi_amdstd_destroy()
3109 kfree(mtd->eraseregions); in cfi_amdstd_destroy()