Lines Matching refs:ubi
138 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
139 static int self_check_in_wl_tree(const struct ubi_device *ubi,
141 static int self_check_in_pq(const struct ubi_device *ubi,
188 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e) in wl_entry_destroy() argument
190 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
201 static int do_work(struct ubi_device *ubi) in do_work() argument
214 down_read(&ubi->work_sem); in do_work()
215 spin_lock(&ubi->wl_lock); in do_work()
216 if (list_empty(&ubi->works)) { in do_work()
217 spin_unlock(&ubi->wl_lock); in do_work()
218 up_read(&ubi->work_sem); in do_work()
222 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
224 ubi->works_count -= 1; in do_work()
225 ubi_assert(ubi->works_count >= 0); in do_work()
226 spin_unlock(&ubi->wl_lock); in do_work()
233 err = wrk->func(ubi, wrk, 0); in do_work()
235 ubi_err(ubi, "work failed with error code %d", err); in do_work()
236 up_read(&ubi->work_sem); in do_work()
290 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e) in prot_queue_add() argument
292 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
297 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
310 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi, in find_wl_entry() argument
337 if (prev_e && !ubi->fm_disabled && in find_wl_entry()
338 !ubi->fm && e->pnum < UBI_FM_MAX_START) in find_wl_entry()
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi, in find_mean_wl_entry() argument
367 e = may_reserve_for_fm(ubi, e, root); in find_mean_wl_entry()
369 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2); in find_mean_wl_entry()
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi) in wl_get_wle() argument
386 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
388 ubi_err(ubi, "no free eraseblocks"); in wl_get_wle()
392 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
398 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
399 ubi->free_count--; in wl_get_wle()
413 static int prot_queue_del(struct ubi_device *ubi, int pnum) in prot_queue_del() argument
417 e = ubi->lookuptbl[pnum]; in prot_queue_del()
421 if (self_check_in_pq(ubi, e)) in prot_queue_del()
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in sync_erase() argument
447 err = self_check_ec(ubi, e->pnum, e->ec); in sync_erase()
451 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in sync_erase()
455 err = ubi_io_sync_erase(ubi, e->pnum, torture); in sync_erase()
465 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu", in sync_erase()
475 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in sync_erase()
480 spin_lock(&ubi->wl_lock); in sync_erase()
481 if (e->ec > ubi->max_ec) in sync_erase()
482 ubi->max_ec = e->ec; in sync_erase()
483 spin_unlock(&ubi->wl_lock); in sync_erase()
498 static void serve_prot_queue(struct ubi_device *ubi) in serve_prot_queue() argument
509 spin_lock(&ubi->wl_lock); in serve_prot_queue()
510 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
515 wl_tree_add(e, &ubi->used); in serve_prot_queue()
521 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
527 ubi->pq_head += 1; in serve_prot_queue()
528 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
529 ubi->pq_head = 0; in serve_prot_queue()
530 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
531 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in __schedule_ubi_work() argument
544 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
545 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
546 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
547 ubi->works_count += 1; in __schedule_ubi_work()
548 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
549 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
550 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) in schedule_ubi_work() argument
563 down_read(&ubi->work_sem); in schedule_ubi_work()
564 __schedule_ubi_work(ubi, wrk); in schedule_ubi_work()
565 up_read(&ubi->work_sem); in schedule_ubi_work()
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in schedule_erase() argument
603 __schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
605 schedule_ubi_work(ubi, wl_wrk); in schedule_erase()
609 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
619 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, in do_sync_erase() argument
631 return __erase_worker(ubi, &wl_wrk); in do_sync_erase()
634 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
646 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, argument
663 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
669 down_read(&ubi->fm_eba_sem);
670 mutex_lock(&ubi->move_mutex);
671 spin_lock(&ubi->wl_lock);
672 ubi_assert(!ubi->move_from && !ubi->move_to);
673 ubi_assert(!ubi->move_to_put);
675 if (!ubi->free.rb_node ||
676 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
688 !ubi->free.rb_node, !ubi->used.rb_node);
695 anchor = !anchor_pebs_available(&ubi->free);
698 e1 = find_anchor_wl_entry(&ubi->used);
701 e2 = get_peb_for_wl(ubi);
705 self_check_in_wl_tree(ubi, e1, &ubi->used);
706 rb_erase(&e1->u.rb, &ubi->used);
708 } else if (!ubi->scrub.rb_node) {
710 if (!ubi->scrub.rb_node) {
717 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
718 e2 = get_peb_for_wl(ubi);
727 wl_tree_add(e2, &ubi->free);
728 ubi->free_count++;
731 self_check_in_wl_tree(ubi, e1, &ubi->used);
732 rb_erase(&e1->u.rb, &ubi->used);
738 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
739 e2 = get_peb_for_wl(ubi);
743 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
744 rb_erase(&e1->u.rb, &ubi->scrub);
748 ubi->move_from = e1;
749 ubi->move_to = e2;
750 spin_unlock(&ubi->wl_lock);
763 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
790 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
802 ubi_err(ubi, "error %d while reading VID header from PEB %d",
810 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
848 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
849 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
850 ubi->erroneous_peb_count);
866 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
870 spin_lock(&ubi->wl_lock);
871 if (!ubi->move_to_put) {
872 wl_tree_add(e2, &ubi->used);
875 ubi->move_from = ubi->move_to = NULL;
876 ubi->move_to_put = ubi->wl_scheduled = 0;
877 spin_unlock(&ubi->wl_lock);
879 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
882 wl_entry_destroy(ubi, e2);
893 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
899 mutex_unlock(&ubi->move_mutex);
900 up_read(&ubi->fm_eba_sem);
915 spin_lock(&ubi->wl_lock);
917 prot_queue_add(ubi, e1);
919 wl_tree_add(e1, &ubi->erroneous);
920 ubi->erroneous_peb_count += 1;
922 wl_tree_add(e1, &ubi->scrub);
924 wl_tree_add(e1, &ubi->used);
926 wl_tree_add(e2, &ubi->free);
927 ubi->free_count++;
930 ubi_assert(!ubi->move_to_put);
931 ubi->move_from = ubi->move_to = NULL;
932 ubi->wl_scheduled = 0;
933 spin_unlock(&ubi->wl_lock);
937 ensure_wear_leveling(ubi, 1);
939 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
945 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
950 mutex_unlock(&ubi->move_mutex);
951 up_read(&ubi->fm_eba_sem);
956 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
959 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
961 spin_lock(&ubi->wl_lock);
962 ubi->move_from = ubi->move_to = NULL;
963 ubi->move_to_put = ubi->wl_scheduled = 0;
964 spin_unlock(&ubi->wl_lock);
967 wl_entry_destroy(ubi, e1);
968 wl_entry_destroy(ubi, e2);
971 ubi_ro_mode(ubi);
972 mutex_unlock(&ubi->move_mutex);
973 up_read(&ubi->fm_eba_sem);
978 ubi->wl_scheduled = 0;
979 spin_unlock(&ubi->wl_lock);
980 mutex_unlock(&ubi->move_mutex);
981 up_read(&ubi->fm_eba_sem);
995 static int ensure_wear_leveling(struct ubi_device *ubi, int nested) argument
1002 spin_lock(&ubi->wl_lock);
1003 if (ubi->wl_scheduled)
1011 if (!ubi->scrub.rb_node) {
1012 if (!ubi->used.rb_node || !ubi->free.rb_node)
1022 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1023 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1031 ubi->wl_scheduled = 1;
1032 spin_unlock(&ubi->wl_lock);
1043 __schedule_ubi_work(ubi, wrk);
1045 schedule_ubi_work(ubi, wrk);
1049 spin_lock(&ubi->wl_lock);
1050 ubi->wl_scheduled = 0;
1052 spin_unlock(&ubi->wl_lock);
1068 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) argument
1079 err = sync_erase(ubi, e, wl_wrk->torture);
1081 spin_lock(&ubi->wl_lock);
1082 wl_tree_add(e, &ubi->free);
1083 ubi->free_count++;
1084 spin_unlock(&ubi->wl_lock);
1090 serve_prot_queue(ubi);
1093 err = ensure_wear_leveling(ubi, 1);
1097 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1104 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1106 wl_entry_destroy(ubi, e);
1113 wl_entry_destroy(ubi, e);
1124 if (!ubi->bad_allowed) {
1125 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1129 spin_lock(&ubi->volumes_lock);
1130 if (ubi->beb_rsvd_pebs == 0) {
1131 if (ubi->avail_pebs == 0) {
1132 spin_unlock(&ubi->volumes_lock);
1133 ubi_err(ubi, "no reserved/available physical eraseblocks");
1136 ubi->avail_pebs -= 1;
1139 spin_unlock(&ubi->volumes_lock);
1141 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1142 err = ubi_io_mark_bad(ubi, pnum);
1146 spin_lock(&ubi->volumes_lock);
1147 if (ubi->beb_rsvd_pebs > 0) {
1153 ubi->avail_pebs += 1;
1156 ubi->beb_rsvd_pebs -= 1;
1158 ubi->bad_peb_count += 1;
1159 ubi->good_peb_count -= 1;
1160 ubi_calculate_reserved(ubi);
1162 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1163 else if (ubi->beb_rsvd_pebs)
1164 ubi_msg(ubi, "%d PEBs left in the reserve",
1165 ubi->beb_rsvd_pebs);
1167 ubi_warn(ubi, "last PEB from the reserve was used");
1168 spin_unlock(&ubi->volumes_lock);
1174 spin_lock(&ubi->volumes_lock);
1175 ubi->avail_pebs += 1;
1176 spin_unlock(&ubi->volumes_lock);
1178 ubi_ro_mode(ubi);
1182 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, argument
1192 wl_entry_destroy(ubi, e);
1196 ret = __erase_worker(ubi, wl_wrk);
1214 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum, argument
1222 ubi_assert(pnum < ubi->peb_count);
1224 down_read(&ubi->fm_protect);
1227 spin_lock(&ubi->wl_lock);
1228 e = ubi->lookuptbl[pnum];
1229 if (e == ubi->move_from) {
1236 spin_unlock(&ubi->wl_lock);
1239 mutex_lock(&ubi->move_mutex);
1240 mutex_unlock(&ubi->move_mutex);
1242 } else if (e == ubi->move_to) {
1253 ubi_assert(!ubi->move_to_put);
1254 ubi->move_to_put = 1;
1255 spin_unlock(&ubi->wl_lock);
1256 up_read(&ubi->fm_protect);
1259 if (in_wl_tree(e, &ubi->used)) {
1260 self_check_in_wl_tree(ubi, e, &ubi->used);
1261 rb_erase(&e->u.rb, &ubi->used);
1262 } else if (in_wl_tree(e, &ubi->scrub)) {
1263 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1264 rb_erase(&e->u.rb, &ubi->scrub);
1265 } else if (in_wl_tree(e, &ubi->erroneous)) {
1266 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1267 rb_erase(&e->u.rb, &ubi->erroneous);
1268 ubi->erroneous_peb_count -= 1;
1269 ubi_assert(ubi->erroneous_peb_count >= 0);
1273 err = prot_queue_del(ubi, e->pnum);
1275 ubi_err(ubi, "PEB %d not found", pnum);
1276 ubi_ro_mode(ubi);
1277 spin_unlock(&ubi->wl_lock);
1278 up_read(&ubi->fm_protect);
1283 spin_unlock(&ubi->wl_lock);
1285 err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1287 spin_lock(&ubi->wl_lock);
1288 wl_tree_add(e, &ubi->used);
1289 spin_unlock(&ubi->wl_lock);
1292 up_read(&ubi->fm_protect);
1306 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) argument
1310 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1313 spin_lock(&ubi->wl_lock);
1314 e = ubi->lookuptbl[pnum];
1315 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1316 in_wl_tree(e, &ubi->erroneous)) {
1317 spin_unlock(&ubi->wl_lock);
1321 if (e == ubi->move_to) {
1328 spin_unlock(&ubi->wl_lock);
1334 if (in_wl_tree(e, &ubi->used)) {
1335 self_check_in_wl_tree(ubi, e, &ubi->used);
1336 rb_erase(&e->u.rb, &ubi->used);
1340 err = prot_queue_del(ubi, e->pnum);
1342 ubi_err(ubi, "PEB %d not found", pnum);
1343 ubi_ro_mode(ubi);
1344 spin_unlock(&ubi->wl_lock);
1349 wl_tree_add(e, &ubi->scrub);
1350 spin_unlock(&ubi->wl_lock);
1356 return ensure_wear_leveling(ubi, 0);
1371 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum) argument
1381 vol_id, lnum, ubi->works_count);
1387 down_read(&ubi->work_sem);
1388 spin_lock(&ubi->wl_lock);
1389 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1393 ubi->works_count -= 1;
1394 ubi_assert(ubi->works_count >= 0);
1395 spin_unlock(&ubi->wl_lock);
1397 err = wrk->func(ubi, wrk, 0);
1399 up_read(&ubi->work_sem);
1403 spin_lock(&ubi->wl_lock);
1408 spin_unlock(&ubi->wl_lock);
1409 up_read(&ubi->work_sem);
1416 down_write(&ubi->work_sem);
1417 up_write(&ubi->work_sem);
1427 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root) argument
1449 wl_entry_destroy(ubi, e);
1461 struct ubi_device *ubi = u; local
1463 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1464 ubi->bgt_name, task_pid_nr(current));
1476 spin_lock(&ubi->wl_lock);
1477 if (list_empty(&ubi->works) || ubi->ro_mode ||
1478 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1480 spin_unlock(&ubi->wl_lock);
1484 spin_unlock(&ubi->wl_lock);
1486 err = do_work(ubi);
1488 ubi_err(ubi, "%s: work failed with error code %d",
1489 ubi->bgt_name, err);
1495 ubi_msg(ubi, "%s: %d consecutive failures",
1496 ubi->bgt_name, WL_MAX_FAILURES);
1497 ubi_ro_mode(ubi);
1498 ubi->thread_enabled = 0;
1507 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1508 ubi->thread_enabled = 0;
1516 static void shutdown_work(struct ubi_device *ubi) argument
1518 while (!list_empty(&ubi->works)) {
1521 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1523 wrk->func(ubi, wrk, 1);
1524 ubi->works_count -= 1;
1525 ubi_assert(ubi->works_count >= 0);
1535 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync) argument
1546 ubi->lookuptbl[e->pnum] = e;
1549 err = sync_erase(ubi, e, false);
1553 wl_tree_add(e, &ubi->free);
1554 ubi->free_count++;
1556 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1564 wl_entry_destroy(ubi, e);
1577 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) argument
1585 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1586 spin_lock_init(&ubi->wl_lock);
1587 mutex_init(&ubi->move_mutex);
1588 init_rwsem(&ubi->work_sem);
1589 ubi->max_ec = ai->max_ec;
1590 INIT_LIST_HEAD(&ubi->works);
1592 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1595 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1596 if (!ubi->lookuptbl)
1600 INIT_LIST_HEAD(&ubi->pq[i]);
1601 ubi->pq_head = 0;
1603 ubi->free_count = 0;
1607 err = erase_aeb(ubi, aeb, false);
1627 wl_tree_add(e, &ubi->free);
1628 ubi->free_count++;
1630 ubi->lookuptbl[e->pnum] = e;
1647 ubi->lookuptbl[e->pnum] = e;
1652 wl_tree_add(e, &ubi->used);
1656 wl_tree_add(e, &ubi->scrub);
1666 e = ubi_find_fm_block(ubi, aeb->pnum);
1669 ubi_assert(!ubi->lookuptbl[e->pnum]);
1670 ubi->lookuptbl[e->pnum] = e;
1680 if (ubi->lookuptbl[aeb->pnum])
1695 err = erase_aeb(ubi, aeb, sync);
1705 ubi_assert(ubi->good_peb_count == found_pebs);
1708 ubi_fastmap_init(ubi, &reserved_pebs);
1710 if (ubi->avail_pebs < reserved_pebs) {
1711 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1712 ubi->avail_pebs, reserved_pebs);
1713 if (ubi->corr_peb_count)
1714 ubi_err(ubi, "%d PEBs are corrupted and not used",
1715 ubi->corr_peb_count);
1719 ubi->avail_pebs -= reserved_pebs;
1720 ubi->rsvd_pebs += reserved_pebs;
1723 err = ensure_wear_leveling(ubi, 0);
1730 shutdown_work(ubi);
1731 tree_destroy(ubi, &ubi->used);
1732 tree_destroy(ubi, &ubi->free);
1733 tree_destroy(ubi, &ubi->scrub);
1734 kfree(ubi->lookuptbl);
1742 static void protection_queue_destroy(struct ubi_device *ubi) argument
1748 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1750 wl_entry_destroy(ubi, e);
1759 void ubi_wl_close(struct ubi_device *ubi) argument
1762 ubi_fastmap_close(ubi);
1763 shutdown_work(ubi);
1764 protection_queue_destroy(ubi);
1765 tree_destroy(ubi, &ubi->used);
1766 tree_destroy(ubi, &ubi->erroneous);
1767 tree_destroy(ubi, &ubi->free);
1768 tree_destroy(ubi, &ubi->scrub);
1769 kfree(ubi->lookuptbl);
1782 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec) argument
1788 if (!ubi_dbg_chk_gen(ubi))
1791 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1795 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1804 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1805 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1825 static int self_check_in_wl_tree(const struct ubi_device *ubi, argument
1828 if (!ubi_dbg_chk_gen(ubi))
1834 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1848 static int self_check_in_pq(const struct ubi_device *ubi, argument
1854 if (!ubi_dbg_chk_gen(ubi))
1858 list_for_each_entry(p, &ubi->pq[i], u.list)
1862 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1868 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) argument
1872 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1873 self_check_in_wl_tree(ubi, e, &ubi->free);
1874 ubi->free_count--;
1875 ubi_assert(ubi->free_count >= 0);
1876 rb_erase(&e->u.rb, &ubi->free);
1890 static int produce_free_peb(struct ubi_device *ubi) argument
1894 while (!ubi->free.rb_node && ubi->works_count) {
1895 spin_unlock(&ubi->wl_lock);
1898 err = do_work(ubi);
1900 spin_lock(&ubi->wl_lock);
1916 int ubi_wl_get_peb(struct ubi_device *ubi) argument
1922 down_read(&ubi->fm_eba_sem);
1923 spin_lock(&ubi->wl_lock);
1924 if (!ubi->free.rb_node) {
1925 if (ubi->works_count == 0) {
1926 ubi_err(ubi, "no free eraseblocks");
1927 ubi_assert(list_empty(&ubi->works));
1928 spin_unlock(&ubi->wl_lock);
1932 err = produce_free_peb(ubi);
1934 spin_unlock(&ubi->wl_lock);
1937 spin_unlock(&ubi->wl_lock);
1938 up_read(&ubi->fm_eba_sem);
1942 e = wl_get_wle(ubi);
1943 prot_queue_add(ubi, e);
1944 spin_unlock(&ubi->wl_lock);
1946 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1947 ubi->peb_size - ubi->vid_hdr_aloffset);
1949 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);