Lines Matching +full:d +full:-

1 // SPDX-License-Identifier: GPL-2.0-only
11 * support and lock-less operation written by Adrian Hunter.
59 #define BLOCK_ERROR (UINT_MAX - 1)
60 #define BLOCK_MAX (UINT_MAX - 2)
80 rb)->erase_count)
82 rb)->erase_count)
153 * page from the current block and is then pre-empted by the GC
160 #define TREE_ROOT(d, name) (&d->trees[MTDSWAP_ ## name].root) argument
161 #define TREE_EMPTY(d, name) (TREE_ROOT(d, name)->rb_node == NULL) argument
162 #define TREE_NONEMPTY(d, name) (!TREE_EMPTY(d, name)) argument
163 #define TREE_COUNT(d, name) (d->trees[MTDSWAP_ ## name].count) argument
165 #define MTDSWAP_MBD_TO_MTDSWAP(dev) ((struct mtdswap_dev *)dev->priv)
182 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background);
184 static loff_t mtdswap_eb_offset(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_offset() argument
186 return (loff_t)(eb - d->eb_data) * d->mtd->erasesize; in mtdswap_eb_offset()
189 static void mtdswap_eb_detach(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_eb_detach() argument
194 if (eb->root) { in mtdswap_eb_detach()
195 tp = container_of(eb->root, struct mtdswap_tree, root); in mtdswap_eb_detach()
196 oldidx = tp - &d->trees[0]; in mtdswap_eb_detach()
198 d->trees[oldidx].count--; in mtdswap_eb_detach()
199 rb_erase(&eb->rb, eb->root); in mtdswap_eb_detach()
208 p = &root->rb_node; in __mtdswap_rb_add()
212 if (eb->erase_count > cur->erase_count) in __mtdswap_rb_add()
213 p = &(*p)->rb_right; in __mtdswap_rb_add()
215 p = &(*p)->rb_left; in __mtdswap_rb_add()
218 rb_link_node(&eb->rb, parent, p); in __mtdswap_rb_add()
219 rb_insert_color(&eb->rb, root); in __mtdswap_rb_add()
222 static void mtdswap_rb_add(struct mtdswap_dev *d, struct swap_eb *eb, int idx) in mtdswap_rb_add() argument
226 if (eb->root == &d->trees[idx].root) in mtdswap_rb_add()
229 mtdswap_eb_detach(d, eb); in mtdswap_rb_add()
230 root = &d->trees[idx].root; in mtdswap_rb_add()
232 eb->root = root; in mtdswap_rb_add()
233 d->trees[idx].count++; in mtdswap_rb_add()
251 static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_badblock() argument
256 d->spare_eblks--; in mtdswap_handle_badblock()
257 eb->flags |= EBLOCK_BAD; in mtdswap_handle_badblock()
258 mtdswap_eb_detach(d, eb); in mtdswap_handle_badblock()
259 eb->root = NULL; in mtdswap_handle_badblock()
262 if (!mtd_can_have_bb(d->mtd)) in mtdswap_handle_badblock()
265 offset = mtdswap_eb_offset(d, eb); in mtdswap_handle_badblock()
266 dev_warn(d->dev, "Marking bad block at %08llx\n", offset); in mtdswap_handle_badblock()
267 ret = mtd_block_markbad(d->mtd, offset); in mtdswap_handle_badblock()
270 dev_warn(d->dev, "Mark block bad failed for block at %08llx " in mtdswap_handle_badblock()
271 "error %d\n", offset, ret); in mtdswap_handle_badblock()
279 static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_handle_write_error() argument
281 unsigned int marked = eb->flags & EBLOCK_FAILED; in mtdswap_handle_write_error()
282 struct swap_eb *curr_write = d->curr_write; in mtdswap_handle_write_error()
284 eb->flags |= EBLOCK_FAILED; in mtdswap_handle_write_error()
286 d->curr_write = NULL; in mtdswap_handle_write_error()
288 if (!marked && d->curr_write_pos != 0) { in mtdswap_handle_write_error()
289 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_handle_write_error()
294 return mtdswap_handle_badblock(d, eb); in mtdswap_handle_write_error()
297 static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from, in mtdswap_read_oob() argument
300 int ret = mtd_read_oob(d->mtd, from, ops); in mtdswap_read_oob()
306 dev_warn(d->dev, "Read OOB failed %d for block at %08llx\n", in mtdswap_read_oob()
311 if (ops->oobretlen < ops->ooblen) { in mtdswap_read_oob()
312 dev_warn(d->dev, "Read OOB return short read (%zd bytes not " in mtdswap_read_oob()
314 ops->oobretlen, ops->ooblen, from); in mtdswap_read_oob()
315 return -EIO; in mtdswap_read_oob()
321 static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_read_markers() argument
328 offset = mtdswap_eb_offset(d, eb); in mtdswap_read_markers()
331 if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset)) in mtdswap_read_markers()
334 ops.ooblen = 2 * d->mtd->oobavail; in mtdswap_read_markers()
335 ops.oobbuf = d->oob_buf; in mtdswap_read_markers()
340 ret = mtdswap_read_oob(d, offset, &ops); in mtdswap_read_markers()
345 data = (struct mtdswap_oobdata *)d->oob_buf; in mtdswap_read_markers()
347 (d->oob_buf + d->mtd->oobavail); in mtdswap_read_markers()
349 if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) { in mtdswap_read_markers()
350 eb->erase_count = le32_to_cpu(data->count); in mtdswap_read_markers()
354 if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY) in mtdswap_read_markers()
360 eb->flags |= EBLOCK_NOMAGIC; in mtdswap_read_markers()
367 static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb, in mtdswap_write_marker() argument
382 n.count = cpu_to_le32(eb->erase_count); in mtdswap_write_marker()
384 offset = mtdswap_eb_offset(d, eb); in mtdswap_write_marker()
388 offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize; in mtdswap_write_marker()
391 ret = mtd_write_oob(d->mtd, offset, &ops); in mtdswap_write_marker()
394 dev_warn(d->dev, "Write OOB failed for block at %08llx " in mtdswap_write_marker()
395 "error %d\n", offset, ret); in mtdswap_write_marker()
396 if (ret == -EIO || mtd_is_eccerr(ret)) in mtdswap_write_marker()
397 mtdswap_handle_write_error(d, eb); in mtdswap_write_marker()
402 dev_warn(d->dev, "Short OOB write for block at %08llx: " in mtdswap_write_marker()
416 static void mtdswap_check_counts(struct mtdswap_dev *d) in mtdswap_check_counts() argument
424 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
425 eb = d->eb_data + i; in mtdswap_check_counts()
427 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) in mtdswap_check_counts()
438 median = rb_entry(medrb, struct swap_eb, rb)->erase_count; in mtdswap_check_counts()
440 d->max_erase_count = MTDSWAP_ECNT_MAX(&hist_root); in mtdswap_check_counts()
442 for (i = 0; i < d->eblks; i++) { in mtdswap_check_counts()
443 eb = d->eb_data + i; in mtdswap_check_counts()
445 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_READERR)) in mtdswap_check_counts()
446 eb->erase_count = median; in mtdswap_check_counts()
448 if (eb->flags & (EBLOCK_NOMAGIC | EBLOCK_BAD | EBLOCK_READERR)) in mtdswap_check_counts()
451 rb_erase(&eb->rb, &hist_root); in mtdswap_check_counts()
455 static void mtdswap_scan_eblks(struct mtdswap_dev *d) in mtdswap_scan_eblks() argument
461 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
462 eb = d->eb_data + i; in mtdswap_scan_eblks()
464 status = mtdswap_read_markers(d, eb); in mtdswap_scan_eblks()
466 eb->flags |= EBLOCK_READERR; in mtdswap_scan_eblks()
468 eb->flags |= EBLOCK_BAD; in mtdswap_scan_eblks()
484 eb->flags |= (idx << EBLOCK_IDX_SHIFT); in mtdswap_scan_eblks()
487 mtdswap_check_counts(d); in mtdswap_scan_eblks()
489 for (i = 0; i < d->eblks; i++) { in mtdswap_scan_eblks()
490 eb = d->eb_data + i; in mtdswap_scan_eblks()
492 if (eb->flags & EBLOCK_BAD) in mtdswap_scan_eblks()
495 idx = eb->flags >> EBLOCK_IDX_SHIFT; in mtdswap_scan_eblks()
496 mtdswap_rb_add(d, eb, idx); in mtdswap_scan_eblks()
504 static void mtdswap_store_eb(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_store_eb() argument
506 unsigned int weight = eb->active_count; in mtdswap_store_eb()
507 unsigned int maxweight = d->pages_per_eblk; in mtdswap_store_eb()
509 if (eb == d->curr_write) in mtdswap_store_eb()
512 if (eb->flags & EBLOCK_BITFLIP) in mtdswap_store_eb()
513 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_store_eb()
514 else if (eb->flags & (EBLOCK_READERR | EBLOCK_FAILED)) in mtdswap_store_eb()
515 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_store_eb()
517 mtdswap_rb_add(d, eb, MTDSWAP_USED); in mtdswap_store_eb()
519 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_store_eb()
521 mtdswap_rb_add(d, eb, MTDSWAP_LOWFRAG); in mtdswap_store_eb()
523 mtdswap_rb_add(d, eb, MTDSWAP_HIFRAG); in mtdswap_store_eb()
526 static int mtdswap_erase_block(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_erase_block() argument
528 struct mtd_info *mtd = d->mtd; in mtdswap_erase_block()
533 eb->erase_count++; in mtdswap_erase_block()
534 if (eb->erase_count > d->max_erase_count) in mtdswap_erase_block()
535 d->max_erase_count = eb->erase_count; in mtdswap_erase_block()
539 erase.addr = mtdswap_eb_offset(d, eb); in mtdswap_erase_block()
540 erase.len = mtd->erasesize; in mtdswap_erase_block()
545 dev_warn(d->dev, in mtdswap_erase_block()
547 erase.addr, mtd->name); in mtdswap_erase_block()
552 dev_err(d->dev, "Cannot erase erase block %#llx on %s\n", in mtdswap_erase_block()
553 erase.addr, mtd->name); in mtdswap_erase_block()
555 mtdswap_handle_badblock(d, eb); in mtdswap_erase_block()
556 return -EIO; in mtdswap_erase_block()
562 static int mtdswap_map_free_block(struct mtdswap_dev *d, unsigned int page, in mtdswap_map_free_block() argument
566 struct swap_eb *old_eb = d->curr_write; in mtdswap_map_free_block()
570 if (old_eb == NULL || d->curr_write_pos >= d->pages_per_eblk) { in mtdswap_map_free_block()
572 if (TREE_EMPTY(d, CLEAN)) in mtdswap_map_free_block()
573 return -ENOSPC; in mtdswap_map_free_block()
575 clean_root = TREE_ROOT(d, CLEAN); in mtdswap_map_free_block()
577 rb_erase(&eb->rb, clean_root); in mtdswap_map_free_block()
578 eb->root = NULL; in mtdswap_map_free_block()
579 TREE_COUNT(d, CLEAN)--; in mtdswap_map_free_block()
581 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY); in mtdswap_map_free_block()
582 } while (ret == -EIO || mtd_is_eccerr(ret)); in mtdswap_map_free_block()
587 d->curr_write_pos = 0; in mtdswap_map_free_block()
588 d->curr_write = eb; in mtdswap_map_free_block()
590 mtdswap_store_eb(d, old_eb); in mtdswap_map_free_block()
593 *block = (d->curr_write - d->eb_data) * d->pages_per_eblk + in mtdswap_map_free_block()
594 d->curr_write_pos; in mtdswap_map_free_block()
596 d->curr_write->active_count++; in mtdswap_map_free_block()
597 d->revmap[*block] = page; in mtdswap_map_free_block()
598 d->curr_write_pos++; in mtdswap_map_free_block()
603 static unsigned int mtdswap_free_page_cnt(struct mtdswap_dev *d) in mtdswap_free_page_cnt() argument
605 return TREE_COUNT(d, CLEAN) * d->pages_per_eblk + in mtdswap_free_page_cnt()
606 d->pages_per_eblk - d->curr_write_pos; in mtdswap_free_page_cnt()
609 static unsigned int mtdswap_enough_free_pages(struct mtdswap_dev *d) in mtdswap_enough_free_pages() argument
611 return mtdswap_free_page_cnt(d) > d->pages_per_eblk; in mtdswap_enough_free_pages()
614 static int mtdswap_write_block(struct mtdswap_dev *d, char *buf, in mtdswap_write_block() argument
617 struct mtd_info *mtd = d->mtd; in mtdswap_write_block()
625 while (!mtdswap_enough_free_pages(d)) in mtdswap_write_block()
626 if (mtdswap_gc(d, 0) > 0) in mtdswap_write_block()
627 return -ENOSPC; in mtdswap_write_block()
629 ret = mtdswap_map_free_block(d, page, bp); in mtdswap_write_block()
630 eb = d->eb_data + (*bp / d->pages_per_eblk); in mtdswap_write_block()
632 if (ret == -EIO || mtd_is_eccerr(ret)) { in mtdswap_write_block()
633 d->curr_write = NULL; in mtdswap_write_block()
634 eb->active_count--; in mtdswap_write_block()
635 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
644 if (ret == -EIO || mtd_is_eccerr(ret)) { in mtdswap_write_block()
645 d->curr_write_pos--; in mtdswap_write_block()
646 eb->active_count--; in mtdswap_write_block()
647 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
648 mtdswap_handle_write_error(d, eb); in mtdswap_write_block()
653 dev_err(d->dev, "Write to MTD device failed: %d (%zd written)", in mtdswap_write_block()
659 dev_err(d->dev, "Short write to MTD device: %zd written", in mtdswap_write_block()
661 ret = -EIO; in mtdswap_write_block()
668 d->curr_write_pos--; in mtdswap_write_block()
669 eb->active_count--; in mtdswap_write_block()
670 d->revmap[*bp] = PAGE_UNDEF; in mtdswap_write_block()
675 static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock, in mtdswap_move_block() argument
678 struct mtd_info *mtd = d->mtd; in mtdswap_move_block()
685 page = d->revmap[oldblock]; in mtdswap_move_block()
690 ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf); in mtdswap_move_block()
693 oldeb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
694 oldeb->flags |= EBLOCK_READERR; in mtdswap_move_block()
696 dev_err(d->dev, "Read Error: %d (block %u)\n", ret, in mtdswap_move_block()
706 dev_err(d->dev, "Short read: %zd (block %u)\n", retlen, in mtdswap_move_block()
708 ret = -EIO; in mtdswap_move_block()
712 ret = mtdswap_write_block(d, d->page_buf, page, newblock, 1); in mtdswap_move_block()
714 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
715 dev_err(d->dev, "Write error: %d\n", ret); in mtdswap_move_block()
719 d->page_data[page] = *newblock; in mtdswap_move_block()
720 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
721 eb = d->eb_data + oldblock / d->pages_per_eblk; in mtdswap_move_block()
722 eb->active_count--; in mtdswap_move_block()
727 d->page_data[page] = BLOCK_ERROR; in mtdswap_move_block()
728 d->revmap[oldblock] = PAGE_UNDEF; in mtdswap_move_block()
732 static int mtdswap_gc_eblock(struct mtdswap_dev *d, struct swap_eb *eb) in mtdswap_gc_eblock() argument
738 eblk_base = (eb - d->eb_data) * d->pages_per_eblk; in mtdswap_gc_eblock()
740 for (i = 0; i < d->pages_per_eblk; i++) { in mtdswap_gc_eblock()
741 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc_eblock()
742 return -ENOSPC; in mtdswap_gc_eblock()
745 if (d->revmap[block] == PAGE_UNDEF) in mtdswap_gc_eblock()
748 ret = mtdswap_move_block(d, block, &newblock); in mtdswap_gc_eblock()
756 static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) in __mtdswap_choose_gc_tree() argument
760 if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD) in __mtdswap_choose_gc_tree()
765 for (idx = MTDSWAP_BITFLIP; idx >= stopat; idx--) in __mtdswap_choose_gc_tree()
766 if (d->trees[idx].root.rb_node != NULL) in __mtdswap_choose_gc_tree()
769 return -1; in __mtdswap_choose_gc_tree()
779 * to triangle with height f1 - f1 and width COLLECT_NONDIRTY_BASE. in mtdswap_wlfreq()
782 dist = maxdiff - MAX_ERASE_DIFF; in mtdswap_wlfreq()
788 * COLLECT_NONDIRTY_BASE and height freq1 - freq2. The ratio y/x is in mtdswap_wlfreq()
791 h = COLLECT_NONDIRTY_FREQ1 - COLLECT_NONDIRTY_FREQ2; in mtdswap_wlfreq()
794 x = dist - base; in mtdswap_wlfreq()
800 static int mtdswap_choose_wl_tree(struct mtdswap_dev *d) in mtdswap_choose_wl_tree() argument
803 unsigned int i, idx = -1, wear, max; in mtdswap_choose_wl_tree()
808 root = &d->trees[i].root; in mtdswap_choose_wl_tree()
809 if (root->rb_node == NULL) in mtdswap_choose_wl_tree()
812 wear = d->max_erase_count - MTDSWAP_ECNT_MIN(root); in mtdswap_choose_wl_tree()
819 if (max > MAX_ERASE_DIFF && pick_cnt >= mtdswap_wlfreq(max) - 1) { in mtdswap_choose_wl_tree()
825 return -1; in mtdswap_choose_wl_tree()
828 static int mtdswap_choose_gc_tree(struct mtdswap_dev *d, in mtdswap_choose_gc_tree() argument
833 if (TREE_NONEMPTY(d, FAILING) && in mtdswap_choose_gc_tree()
834 (background || (TREE_EMPTY(d, CLEAN) && TREE_EMPTY(d, DIRTY)))) in mtdswap_choose_gc_tree()
837 idx = mtdswap_choose_wl_tree(d); in mtdswap_choose_gc_tree()
841 return __mtdswap_choose_gc_tree(d); in mtdswap_choose_gc_tree()
844 static struct swap_eb *mtdswap_pick_gc_eblk(struct mtdswap_dev *d, in mtdswap_pick_gc_eblk() argument
851 if (background && TREE_COUNT(d, CLEAN) > CLEAN_BLOCK_THRESHOLD && in mtdswap_pick_gc_eblk()
852 TREE_EMPTY(d, DIRTY) && TREE_EMPTY(d, FAILING)) in mtdswap_pick_gc_eblk()
855 idx = mtdswap_choose_gc_tree(d, background); in mtdswap_pick_gc_eblk()
859 rp = &d->trees[idx].root; in mtdswap_pick_gc_eblk()
862 rb_erase(&eb->rb, rp); in mtdswap_pick_gc_eblk()
863 eb->root = NULL; in mtdswap_pick_gc_eblk()
864 d->trees[idx].count--; in mtdswap_pick_gc_eblk()
873 static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d, in mtdswap_eblk_passes() argument
876 struct mtd_info *mtd = d->mtd; in mtdswap_eblk_passes()
879 unsigned int *p1 = (unsigned int *)d->page_buf; in mtdswap_eblk_passes()
880 unsigned char *p2 = (unsigned char *)d->oob_buf; in mtdswap_eblk_passes()
885 ops.len = mtd->writesize; in mtdswap_eblk_passes()
886 ops.ooblen = mtd->oobavail; in mtdswap_eblk_passes()
888 ops.datbuf = d->page_buf; in mtdswap_eblk_passes()
889 ops.oobbuf = d->oob_buf; in mtdswap_eblk_passes()
890 base = mtdswap_eb_offset(d, eb); in mtdswap_eblk_passes()
891 mtd_pages = d->pages_per_eblk * PAGE_SIZE / mtd->writesize; in mtdswap_eblk_passes()
897 memset(d->page_buf, patt, mtd->writesize); in mtdswap_eblk_passes()
898 memset(d->oob_buf, patt, mtd->oobavail); in mtdswap_eblk_passes()
903 pos += mtd->writesize; in mtdswap_eblk_passes()
913 for (j = 0; j < mtd->writesize/sizeof(int); j++) in mtdswap_eblk_passes()
917 for (j = 0; j < mtd->oobavail; j++) in mtdswap_eblk_passes()
921 pos += mtd->writesize; in mtdswap_eblk_passes()
924 ret = mtdswap_erase_block(d, eb); in mtdswap_eblk_passes()
929 eb->flags &= ~EBLOCK_READERR; in mtdswap_eblk_passes()
933 mtdswap_handle_badblock(d, eb); in mtdswap_eblk_passes()
937 static int mtdswap_gc(struct mtdswap_dev *d, unsigned int background) in mtdswap_gc() argument
942 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_gc()
945 eb = mtdswap_pick_gc_eblk(d, background); in mtdswap_gc()
949 ret = mtdswap_gc_eblock(d, eb); in mtdswap_gc()
950 if (ret == -ENOSPC) in mtdswap_gc()
953 if (eb->flags & EBLOCK_FAILED) { in mtdswap_gc()
954 mtdswap_handle_badblock(d, eb); in mtdswap_gc()
958 eb->flags &= ~EBLOCK_BITFLIP; in mtdswap_gc()
959 ret = mtdswap_erase_block(d, eb); in mtdswap_gc()
960 if ((eb->flags & EBLOCK_READERR) && in mtdswap_gc()
961 (ret || !mtdswap_eblk_passes(d, eb))) in mtdswap_gc()
965 ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_CLEAN); in mtdswap_gc()
968 mtdswap_rb_add(d, eb, MTDSWAP_CLEAN); in mtdswap_gc()
969 else if (ret != -EIO && !mtd_is_eccerr(ret)) in mtdswap_gc()
970 mtdswap_rb_add(d, eb, MTDSWAP_DIRTY); in mtdswap_gc()
977 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_background() local
981 ret = mtdswap_gc(d, 1); in mtdswap_background()
987 static void mtdswap_cleanup(struct mtdswap_dev *d) in mtdswap_cleanup() argument
989 vfree(d->eb_data); in mtdswap_cleanup()
990 vfree(d->revmap); in mtdswap_cleanup()
991 vfree(d->page_data); in mtdswap_cleanup()
992 kfree(d->oob_buf); in mtdswap_cleanup()
993 kfree(d->page_buf); in mtdswap_cleanup()
998 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_flush() local
1000 mtd_sync(d->mtd); in mtdswap_flush()
1012 for (offset = 0; offset < size; offset += mtd->erasesize) in mtdswap_badblocks()
1022 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_writesect() local
1027 d->sect_write_count++; in mtdswap_writesect()
1029 if (d->spare_eblks < MIN_SPARE_EBLOCKS) in mtdswap_writesect()
1030 return -ENOSPC; in mtdswap_writesect()
1037 page--; in mtdswap_writesect()
1040 mapped = d->page_data[page]; in mtdswap_writesect()
1042 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_writesect()
1043 eb->active_count--; in mtdswap_writesect()
1044 mtdswap_store_eb(d, eb); in mtdswap_writesect()
1045 d->page_data[page] = BLOCK_UNDEF; in mtdswap_writesect()
1046 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_writesect()
1049 ret = mtdswap_write_block(d, buf, page, &newblock, 0); in mtdswap_writesect()
1050 d->mtd_write_count++; in mtdswap_writesect()
1055 d->page_data[page] = newblock; in mtdswap_writesect()
1061 static int mtdswap_auto_header(struct mtdswap_dev *d, char *buf) in mtdswap_auto_header() argument
1065 memset(buf, 0, PAGE_SIZE - 10); in mtdswap_auto_header()
1067 hd->info.version = 1; in mtdswap_auto_header()
1068 hd->info.last_page = d->mbd_dev->size - 1; in mtdswap_auto_header()
1069 hd->info.nr_badpages = 0; in mtdswap_auto_header()
1071 memcpy(buf + PAGE_SIZE - 10, "SWAPSPACE2", 10); in mtdswap_auto_header()
1079 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_readsect() local
1080 struct mtd_info *mtd = d->mtd; in mtdswap_readsect()
1087 d->sect_read_count++; in mtdswap_readsect()
1091 return mtdswap_auto_header(d, buf); in mtdswap_readsect()
1093 page--; in mtdswap_readsect()
1096 realblock = d->page_data[page]; in mtdswap_readsect()
1102 return -EIO; in mtdswap_readsect()
1105 eb = d->eb_data + (realblock / d->pages_per_eblk); in mtdswap_readsect()
1106 BUG_ON(d->revmap[realblock] == PAGE_UNDEF); in mtdswap_readsect()
1114 d->mtd_read_count++; in mtdswap_readsect()
1116 eb->flags |= EBLOCK_BITFLIP; in mtdswap_readsect()
1117 mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP); in mtdswap_readsect()
1122 dev_err(d->dev, "Read error %d\n", ret); in mtdswap_readsect()
1123 eb->flags |= EBLOCK_READERR; in mtdswap_readsect()
1124 mtdswap_rb_add(d, eb, MTDSWAP_FAILING); in mtdswap_readsect()
1133 dev_err(d->dev, "Short read %zd\n", retlen); in mtdswap_readsect()
1134 return -EIO; in mtdswap_readsect()
1143 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_discard() local
1148 d->discard_count++; in mtdswap_discard()
1151 mapped = d->page_data[page]; in mtdswap_discard()
1153 eb = d->eb_data + (mapped / d->pages_per_eblk); in mtdswap_discard()
1154 eb->active_count--; in mtdswap_discard()
1155 mtdswap_store_eb(d, eb); in mtdswap_discard()
1156 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1157 d->revmap[mapped] = PAGE_UNDEF; in mtdswap_discard()
1158 d->discard_page_count++; in mtdswap_discard()
1160 d->page_data[page] = BLOCK_UNDEF; in mtdswap_discard()
1161 d->discard_page_count++; in mtdswap_discard()
1170 struct mtdswap_dev *d = (struct mtdswap_dev *) s->private; in mtdswap_show() local
1181 mutex_lock(&d->mbd_dev->lock); in mtdswap_show()
1184 struct rb_root *root = &d->trees[i].root; in mtdswap_show()
1186 if (root->rb_node) { in mtdswap_show()
1187 count[i] = d->trees[i].count; in mtdswap_show()
1194 if (d->curr_write) { in mtdswap_show()
1196 cwp = d->curr_write_pos; in mtdswap_show()
1197 cwecount = d->curr_write->erase_count; in mtdswap_show()
1201 for (i = 0; i < d->eblks; i++) in mtdswap_show()
1202 sum += d->eb_data[i].erase_count; in mtdswap_show()
1204 use_size = (uint64_t)d->eblks * d->mtd->erasesize; in mtdswap_show()
1205 bb_cnt = mtdswap_badblocks(d->mtd, use_size); in mtdswap_show()
1208 pages = d->mbd_dev->size; in mtdswap_show()
1210 if (d->page_data[i] != BLOCK_UNDEF) in mtdswap_show()
1213 mutex_unlock(&d->mbd_dev->lock); in mtdswap_show()
1220 seq_printf(s, "%s:\t%5d erase blocks, erased min %d, " in mtdswap_show()
1221 "max %d times\n", in mtdswap_show()
1224 seq_printf(s, "%s:\t%5d erase blocks, all erased %d " in mtdswap_show()
1234 cwp, d->pages_per_eblk - cwp, cwecount); in mtdswap_show()
1240 seq_printf(s, "mtdswap_readsect count: %llu\n", d->sect_read_count); in mtdswap_show()
1241 seq_printf(s, "mtdswap_writesect count: %llu\n", d->sect_write_count); in mtdswap_show()
1242 seq_printf(s, "mtdswap_discard count: %llu\n", d->discard_count); in mtdswap_show()
1243 seq_printf(s, "mtd read count: %llu\n", d->mtd_read_count); in mtdswap_show()
1244 seq_printf(s, "mtd write count: %llu\n", d->mtd_write_count); in mtdswap_show()
1245 seq_printf(s, "discarded pages count: %llu\n", d->discard_page_count); in mtdswap_show()
1255 static int mtdswap_add_debugfs(struct mtdswap_dev *d) in mtdswap_add_debugfs() argument
1257 struct dentry *root = d->mtd->dbg.dfs_dir; in mtdswap_add_debugfs()
1263 return -1; in mtdswap_add_debugfs()
1265 debugfs_create_file("mtdswap_stats", S_IRUSR, root, d, &mtdswap_fops); in mtdswap_add_debugfs()
1270 static int mtdswap_init(struct mtdswap_dev *d, unsigned int eblocks, in mtdswap_init() argument
1273 struct mtd_info *mtd = d->mbd_dev->mtd; in mtdswap_init()
1275 int ret = -ENOMEM; in mtdswap_init()
1277 d->mtd = mtd; in mtdswap_init()
1278 d->eblks = eblocks; in mtdswap_init()
1279 d->spare_eblks = spare_cnt; in mtdswap_init()
1280 d->pages_per_eblk = mtd->erasesize >> PAGE_SHIFT; in mtdswap_init()
1282 pages = d->mbd_dev->size; in mtdswap_init()
1283 blocks = eblocks * d->pages_per_eblk; in mtdswap_init()
1286 d->trees[i].root = RB_ROOT; in mtdswap_init()
1288 d->page_data = vmalloc(array_size(pages, sizeof(int))); in mtdswap_init()
1289 if (!d->page_data) in mtdswap_init()
1292 d->revmap = vmalloc(array_size(blocks, sizeof(int))); in mtdswap_init()
1293 if (!d->revmap) in mtdswap_init()
1296 eblk_bytes = sizeof(struct swap_eb)*d->eblks; in mtdswap_init()
1297 d->eb_data = vzalloc(eblk_bytes); in mtdswap_init()
1298 if (!d->eb_data) in mtdswap_init()
1302 d->page_data[i] = BLOCK_UNDEF; in mtdswap_init()
1305 d->revmap[i] = PAGE_UNDEF; in mtdswap_init()
1307 d->page_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); in mtdswap_init()
1308 if (!d->page_buf) in mtdswap_init()
1311 d->oob_buf = kmalloc_array(2, mtd->oobavail, GFP_KERNEL); in mtdswap_init()
1312 if (!d->oob_buf) in mtdswap_init()
1315 mtdswap_scan_eblks(d); in mtdswap_init()
1320 kfree(d->page_buf); in mtdswap_init()
1322 vfree(d->eb_data); in mtdswap_init()
1324 vfree(d->revmap); in mtdswap_init()
1326 vfree(d->page_data); in mtdswap_init()
1328 printk(KERN_ERR "%s: init failed (%d)\n", MTDSWAP_PREFIX, ret); in mtdswap_init()
1334 struct mtdswap_dev *d; in mtdswap_add_mtd() local
1351 if (mtd->index == part) in mtdswap_add_mtd()
1355 if (mtd->index != part) in mtdswap_add_mtd()
1358 if (mtd->erasesize < PAGE_SIZE || mtd->erasesize % PAGE_SIZE) { in mtdswap_add_mtd()
1360 "%lu\n", MTDSWAP_PREFIX, mtd->erasesize, PAGE_SIZE); in mtdswap_add_mtd()
1364 if (PAGE_SIZE % mtd->writesize || mtd->writesize > PAGE_SIZE) { in mtdswap_add_mtd()
1366 " %u\n", MTDSWAP_PREFIX, PAGE_SIZE, mtd->writesize); in mtdswap_add_mtd()
1370 if (!mtd->oobsize || mtd->oobavail < MTDSWAP_OOBSIZE) { in mtdswap_add_mtd()
1372 "%d available, %zu needed.\n", in mtdswap_add_mtd()
1373 MTDSWAP_PREFIX, mtd->oobavail, MTDSWAP_OOBSIZE); in mtdswap_add_mtd()
1380 use_size = mtd->size; in mtdswap_add_mtd()
1383 if (mtd->size > size_limit) { in mtdswap_add_mtd()
1390 use_size = (uint64_t)eblocks * mtd->erasesize; in mtdswap_add_mtd()
1392 eavailable = eblocks - bad_blocks; in mtdswap_add_mtd()
1396 "%d needed\n", MTDSWAP_PREFIX, eavailable, in mtdswap_add_mtd()
1406 if (spare_cnt > eavailable - 1) in mtdswap_add_mtd()
1407 spare_cnt = eavailable - 1; in mtdswap_add_mtd()
1409 swap_size = (uint64_t)(eavailable - spare_cnt) * mtd->erasesize + in mtdswap_add_mtd()
1416 d = kzalloc(sizeof(struct mtdswap_dev), GFP_KERNEL); in mtdswap_add_mtd()
1417 if (!d) in mtdswap_add_mtd()
1422 kfree(d); in mtdswap_add_mtd()
1426 d->mbd_dev = mbd_dev; in mtdswap_add_mtd()
1427 mbd_dev->priv = d; in mtdswap_add_mtd()
1429 mbd_dev->mtd = mtd; in mtdswap_add_mtd()
1430 mbd_dev->devnum = mtd->index; in mtdswap_add_mtd()
1431 mbd_dev->size = swap_size >> PAGE_SHIFT; in mtdswap_add_mtd()
1432 mbd_dev->tr = tr; in mtdswap_add_mtd()
1434 if (!(mtd->flags & MTD_WRITEABLE)) in mtdswap_add_mtd()
1435 mbd_dev->readonly = 1; in mtdswap_add_mtd()
1437 if (mtdswap_init(d, eblocks, spare_cnt) < 0) in mtdswap_add_mtd()
1443 d->dev = disk_to_dev(mbd_dev->disk); in mtdswap_add_mtd()
1445 ret = mtdswap_add_debugfs(d); in mtdswap_add_mtd()
1455 mtdswap_cleanup(d); in mtdswap_add_mtd()
1459 kfree(d); in mtdswap_add_mtd()
1464 struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev); in mtdswap_remove_dev() local
1467 mtdswap_cleanup(d); in mtdswap_remove_dev()
1468 kfree(d); in mtdswap_remove_dev()