1 /*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21 /*
22 * The UBI Eraseblock Association (EBA) sub-system.
23 *
24 * This sub-system is responsible for I/O to/from logical eraseblock.
25 *
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
29 *
30 * The EBA sub-system implements per-logical eraseblock locking. Before
31 * accessing a logical eraseblock it is locked for reading or writing. The
32 * per-logical eraseblock locking is implemented by means of the lock tree. The
33 * lock tree is an RB-tree which refers all the currently locked logical
34 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
35 * They are indexed by (@vol_id, @lnum) pairs.
36 *
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
42 */
43
44 #include <linux/slab.h>
45 #include <linux/crc32.h>
46 #include <linux/err.h>
47 #include "ubi.h"
48
49 /* Number of physical eraseblocks reserved for atomic LEB change operation */
50 #define EBA_RESERVED_PEBS 1
51
52 /**
53 * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
54 * @pnum: the physical eraseblock number attached to the LEB
55 *
56 * This structure is encoding a LEB -> PEB association. Note that the LEB
57 * number is not stored here, because it is the index used to access the
58 * entries table.
59 */
60 struct ubi_eba_entry {
61 int pnum;
62 };
63
64 /**
65 * struct ubi_eba_table - LEB -> PEB association information
66 * @entries: the LEB to PEB mapping (one entry per LEB).
67 *
68 * This structure is private to the EBA logic and should be kept here.
69 * It is encoding the LEB to PEB association table, and is subject to
70 * changes.
71 */
72 struct ubi_eba_table {
73 struct ubi_eba_entry *entries;
74 };
75
76 /**
77 * next_sqnum - get next sequence number.
78 * @ubi: UBI device description object
79 *
80 * This function returns next sequence number to use, which is just the current
81 * global sequence counter value. It also increases the global sequence
82 * counter.
83 */
ubi_next_sqnum(struct ubi_device * ubi)84 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
85 {
86 unsigned long long sqnum;
87
88 spin_lock(&ubi->ltree_lock);
89 sqnum = ubi->global_sqnum++;
90 spin_unlock(&ubi->ltree_lock);
91
92 return sqnum;
93 }
94
95 /**
96 * ubi_get_compat - get compatibility flags of a volume.
97 * @ubi: UBI device description object
98 * @vol_id: volume ID
99 *
100 * This function returns compatibility flags for an internal volume. User
101 * volumes have no compatibility flags, so %0 is returned.
102 */
ubi_get_compat(const struct ubi_device * ubi,int vol_id)103 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
104 {
105 if (vol_id == UBI_LAYOUT_VOLUME_ID)
106 return UBI_LAYOUT_VOLUME_COMPAT;
107 return 0;
108 }
109
110 /**
111 * ubi_eba_get_ldesc - get information about a LEB
112 * @vol: volume description object
113 * @lnum: logical eraseblock number
114 * @ldesc: the LEB descriptor to fill
115 *
116 * Used to query information about a specific LEB.
117 * It is currently only returning the physical position of the LEB, but will be
118 * extended to provide more information.
119 */
ubi_eba_get_ldesc(struct ubi_volume * vol,int lnum,struct ubi_eba_leb_desc * ldesc)120 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
121 struct ubi_eba_leb_desc *ldesc)
122 {
123 ldesc->lnum = lnum;
124 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
125 }
126
127 /**
128 * ubi_eba_create_table - allocate a new EBA table and initialize it with all
129 * LEBs unmapped
130 * @vol: volume containing the EBA table to copy
131 * @nentries: number of entries in the table
132 *
133 * Allocate a new EBA table and initialize it with all LEBs unmapped.
134 * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
135 */
ubi_eba_create_table(struct ubi_volume * vol,int nentries)136 struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
137 int nentries)
138 {
139 struct ubi_eba_table *tbl;
140 int err = -ENOMEM;
141 int i;
142
143 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
144 if (!tbl)
145 return ERR_PTR(-ENOMEM);
146
147 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
148 GFP_KERNEL);
149 if (!tbl->entries)
150 goto err;
151
152 for (i = 0; i < nentries; i++)
153 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
154
155 return tbl;
156
157 err:
158 kfree(tbl->entries);
159 kfree(tbl);
160
161 return ERR_PTR(err);
162 }
163
164 /**
165 * ubi_eba_destroy_table - destroy an EBA table
166 * @tbl: the table to destroy
167 *
168 * Destroy an EBA table.
169 */
ubi_eba_destroy_table(struct ubi_eba_table * tbl)170 void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
171 {
172 if (!tbl)
173 return;
174
175 kfree(tbl->entries);
176 kfree(tbl);
177 }
178
179 /**
180 * ubi_eba_copy_table - copy the EBA table attached to vol into another table
181 * @vol: volume containing the EBA table to copy
182 * @dst: destination
183 * @nentries: number of entries to copy
184 *
185 * Copy the EBA table stored in vol into the one pointed by dst.
186 */
ubi_eba_copy_table(struct ubi_volume * vol,struct ubi_eba_table * dst,int nentries)187 void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
188 int nentries)
189 {
190 struct ubi_eba_table *src;
191 int i;
192
193 ubi_assert(dst && vol && vol->eba_tbl);
194
195 src = vol->eba_tbl;
196
197 for (i = 0; i < nentries; i++)
198 dst->entries[i].pnum = src->entries[i].pnum;
199 }
200
201 /**
202 * ubi_eba_replace_table - assign a new EBA table to a volume
203 * @vol: volume containing the EBA table to copy
204 * @tbl: new EBA table
205 *
206 * Assign a new EBA table to the volume and release the old one.
207 */
ubi_eba_replace_table(struct ubi_volume * vol,struct ubi_eba_table * tbl)208 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
209 {
210 ubi_eba_destroy_table(vol->eba_tbl);
211 vol->eba_tbl = tbl;
212 }
213
214 /**
215 * ltree_lookup - look up the lock tree.
216 * @ubi: UBI device description object
217 * @vol_id: volume ID
218 * @lnum: logical eraseblock number
219 *
220 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
221 * object if the logical eraseblock is locked and %NULL if it is not.
222 * @ubi->ltree_lock has to be locked.
223 */
ltree_lookup(struct ubi_device * ubi,int vol_id,int lnum)224 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
225 int lnum)
226 {
227 struct rb_node *p;
228
229 p = ubi->ltree.rb_node;
230 while (p) {
231 struct ubi_ltree_entry *le;
232
233 le = rb_entry(p, struct ubi_ltree_entry, rb);
234
235 if (vol_id < le->vol_id)
236 p = p->rb_left;
237 else if (vol_id > le->vol_id)
238 p = p->rb_right;
239 else {
240 if (lnum < le->lnum)
241 p = p->rb_left;
242 else if (lnum > le->lnum)
243 p = p->rb_right;
244 else
245 return le;
246 }
247 }
248
249 return NULL;
250 }
251
252 /**
253 * ltree_add_entry - add new entry to the lock tree.
254 * @ubi: UBI device description object
255 * @vol_id: volume ID
256 * @lnum: logical eraseblock number
257 *
258 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
259 * lock tree. If such entry is already there, its usage counter is increased.
260 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
261 * failed.
262 */
ltree_add_entry(struct ubi_device * ubi,int vol_id,int lnum)263 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
264 int vol_id, int lnum)
265 {
266 struct ubi_ltree_entry *le, *le1, *le_free;
267
268 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
269 if (!le)
270 return ERR_PTR(-ENOMEM);
271
272 le->users = 0;
273 init_rwsem(&le->mutex);
274 le->vol_id = vol_id;
275 le->lnum = lnum;
276
277 spin_lock(&ubi->ltree_lock);
278 le1 = ltree_lookup(ubi, vol_id, lnum);
279
280 if (le1) {
281 /*
282 * This logical eraseblock is already locked. The newly
283 * allocated lock entry is not needed.
284 */
285 le_free = le;
286 le = le1;
287 } else {
288 struct rb_node **p, *parent = NULL;
289
290 /*
291 * No lock entry, add the newly allocated one to the
292 * @ubi->ltree RB-tree.
293 */
294 le_free = NULL;
295
296 p = &ubi->ltree.rb_node;
297 while (*p) {
298 parent = *p;
299 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
300
301 if (vol_id < le1->vol_id)
302 p = &(*p)->rb_left;
303 else if (vol_id > le1->vol_id)
304 p = &(*p)->rb_right;
305 else {
306 ubi_assert(lnum != le1->lnum);
307 if (lnum < le1->lnum)
308 p = &(*p)->rb_left;
309 else
310 p = &(*p)->rb_right;
311 }
312 }
313
314 rb_link_node(&le->rb, parent, p);
315 rb_insert_color(&le->rb, &ubi->ltree);
316 }
317 le->users += 1;
318 spin_unlock(&ubi->ltree_lock);
319
320 kfree(le_free);
321 return le;
322 }
323
324 /**
325 * leb_read_lock - lock logical eraseblock for reading.
326 * @ubi: UBI device description object
327 * @vol_id: volume ID
328 * @lnum: logical eraseblock number
329 *
330 * This function locks a logical eraseblock for reading. Returns zero in case
331 * of success and a negative error code in case of failure.
332 */
leb_read_lock(struct ubi_device * ubi,int vol_id,int lnum)333 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
334 {
335 struct ubi_ltree_entry *le;
336
337 le = ltree_add_entry(ubi, vol_id, lnum);
338 if (IS_ERR(le))
339 return PTR_ERR(le);
340 down_read(&le->mutex);
341 return 0;
342 }
343
344 /**
345 * leb_read_unlock - unlock logical eraseblock.
346 * @ubi: UBI device description object
347 * @vol_id: volume ID
348 * @lnum: logical eraseblock number
349 */
leb_read_unlock(struct ubi_device * ubi,int vol_id,int lnum)350 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
351 {
352 struct ubi_ltree_entry *le;
353
354 spin_lock(&ubi->ltree_lock);
355 le = ltree_lookup(ubi, vol_id, lnum);
356 le->users -= 1;
357 ubi_assert(le->users >= 0);
358 up_read(&le->mutex);
359 if (le->users == 0) {
360 rb_erase(&le->rb, &ubi->ltree);
361 kfree(le);
362 }
363 spin_unlock(&ubi->ltree_lock);
364 }
365
366 /**
367 * leb_write_lock - lock logical eraseblock for writing.
368 * @ubi: UBI device description object
369 * @vol_id: volume ID
370 * @lnum: logical eraseblock number
371 *
372 * This function locks a logical eraseblock for writing. Returns zero in case
373 * of success and a negative error code in case of failure.
374 */
leb_write_lock(struct ubi_device * ubi,int vol_id,int lnum)375 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
376 {
377 struct ubi_ltree_entry *le;
378
379 le = ltree_add_entry(ubi, vol_id, lnum);
380 if (IS_ERR(le))
381 return PTR_ERR(le);
382 down_write(&le->mutex);
383 return 0;
384 }
385
386 /**
387 * leb_write_trylock - try to lock logical eraseblock for writing.
388 * @ubi: UBI device description object
389 * @vol_id: volume ID
390 * @lnum: logical eraseblock number
391 *
392 * This function locks a logical eraseblock for writing if there is no
393 * contention and does nothing if there is contention. Returns %0 in case of
394 * success, %1 in case of contention, and and a negative error code in case of
395 * failure.
396 */
leb_write_trylock(struct ubi_device * ubi,int vol_id,int lnum)397 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
398 {
399 struct ubi_ltree_entry *le;
400
401 le = ltree_add_entry(ubi, vol_id, lnum);
402 if (IS_ERR(le))
403 return PTR_ERR(le);
404 if (down_write_trylock(&le->mutex))
405 return 0;
406
407 /* Contention, cancel */
408 spin_lock(&ubi->ltree_lock);
409 le->users -= 1;
410 ubi_assert(le->users >= 0);
411 if (le->users == 0) {
412 rb_erase(&le->rb, &ubi->ltree);
413 kfree(le);
414 }
415 spin_unlock(&ubi->ltree_lock);
416
417 return 1;
418 }
419
420 /**
421 * leb_write_unlock - unlock logical eraseblock.
422 * @ubi: UBI device description object
423 * @vol_id: volume ID
424 * @lnum: logical eraseblock number
425 */
leb_write_unlock(struct ubi_device * ubi,int vol_id,int lnum)426 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
427 {
428 struct ubi_ltree_entry *le;
429
430 spin_lock(&ubi->ltree_lock);
431 le = ltree_lookup(ubi, vol_id, lnum);
432 le->users -= 1;
433 ubi_assert(le->users >= 0);
434 up_write(&le->mutex);
435 if (le->users == 0) {
436 rb_erase(&le->rb, &ubi->ltree);
437 kfree(le);
438 }
439 spin_unlock(&ubi->ltree_lock);
440 }
441
442 /**
443 * ubi_eba_is_mapped - check if a LEB is mapped.
444 * @vol: volume description object
445 * @lnum: logical eraseblock number
446 *
447 * This function returns true if the LEB is mapped, false otherwise.
448 */
ubi_eba_is_mapped(struct ubi_volume * vol,int lnum)449 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
450 {
451 return vol->eba_tbl->entries[lnum].pnum >= 0;
452 }
453
454 /**
455 * ubi_eba_unmap_leb - un-map logical eraseblock.
456 * @ubi: UBI device description object
457 * @vol: volume description object
458 * @lnum: logical eraseblock number
459 *
460 * This function un-maps logical eraseblock @lnum and schedules corresponding
461 * physical eraseblock for erasure. Returns zero in case of success and a
462 * negative error code in case of failure.
463 */
ubi_eba_unmap_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum)464 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
465 int lnum)
466 {
467 int err, pnum, vol_id = vol->vol_id;
468
469 if (ubi->ro_mode)
470 return -EROFS;
471
472 err = leb_write_lock(ubi, vol_id, lnum);
473 if (err)
474 return err;
475
476 pnum = vol->eba_tbl->entries[lnum].pnum;
477 if (pnum < 0)
478 /* This logical eraseblock is already unmapped */
479 goto out_unlock;
480
481 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
482
483 down_read(&ubi->fm_eba_sem);
484 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
485 up_read(&ubi->fm_eba_sem);
486 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
487
488 out_unlock:
489 leb_write_unlock(ubi, vol_id, lnum);
490 return err;
491 }
492
493 #ifdef CONFIG_MTD_UBI_FASTMAP
494 /**
495 * check_mapping - check and fixup a mapping
496 * @ubi: UBI device description object
497 * @vol: volume description object
498 * @lnum: logical eraseblock number
499 * @pnum: physical eraseblock number
500 *
501 * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
502 * operations, if such an operation is interrupted the mapping still looks
503 * good, but upon first read an ECC is reported to the upper layer.
504 * Normaly during the full-scan at attach time this is fixed, for Fastmap
505 * we have to deal with it while reading.
506 * If the PEB behind a LEB shows this symthom we change the mapping to
507 * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
508 *
509 * Returns 0 on success, negative error code in case of failure.
510 */
check_mapping(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,int * pnum)511 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
512 int *pnum)
513 {
514 int err;
515 struct ubi_vid_io_buf *vidb;
516 struct ubi_vid_hdr *vid_hdr;
517
518 if (!ubi->fast_attach)
519 return 0;
520
521 if (!vol->checkmap || test_bit(lnum, vol->checkmap))
522 return 0;
523
524 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
525 if (!vidb)
526 return -ENOMEM;
527
528 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
529 if (err > 0 && err != UBI_IO_BITFLIPS) {
530 int torture = 0;
531
532 switch (err) {
533 case UBI_IO_FF:
534 case UBI_IO_FF_BITFLIPS:
535 case UBI_IO_BAD_HDR:
536 case UBI_IO_BAD_HDR_EBADMSG:
537 break;
538 default:
539 ubi_assert(0);
540 }
541
542 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
543 torture = 1;
544
545 down_read(&ubi->fm_eba_sem);
546 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
547 up_read(&ubi->fm_eba_sem);
548 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
549
550 *pnum = UBI_LEB_UNMAPPED;
551 } else if (err < 0) {
552 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
553 *pnum, err);
554
555 goto out_free;
556 } else {
557 int found_vol_id, found_lnum;
558
559 ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
560
561 vid_hdr = ubi_get_vid_hdr(vidb);
562 found_vol_id = be32_to_cpu(vid_hdr->vol_id);
563 found_lnum = be32_to_cpu(vid_hdr->lnum);
564
565 if (found_lnum != lnum || found_vol_id != vol->vol_id) {
566 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
567 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
568 ubi_ro_mode(ubi);
569 err = -EINVAL;
570 goto out_free;
571 }
572 }
573
574 set_bit(lnum, vol->checkmap);
575 err = 0;
576
577 out_free:
578 ubi_free_vid_buf(vidb);
579
580 return err;
581 }
582 #else
check_mapping(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,int * pnum)583 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
584 int *pnum)
585 {
586 return 0;
587 }
588 #endif
589
590 /**
591 * ubi_eba_read_leb - read data.
592 * @ubi: UBI device description object
593 * @vol: volume description object
594 * @lnum: logical eraseblock number
595 * @buf: buffer to store the read data
596 * @offset: offset from where to read
597 * @len: how many bytes to read
598 * @check: data CRC check flag
599 *
600 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
601 * bytes. The @check flag only makes sense for static volumes and forces
602 * eraseblock data CRC checking.
603 *
604 * In case of success this function returns zero. In case of a static volume,
605 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
606 * returned for any volume type if an ECC error was detected by the MTD device
607 * driver. Other negative error cored may be returned in case of other errors.
608 */
ubi_eba_read_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,void * buf,int offset,int len,int check)609 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
610 void *buf, int offset, int len, int check)
611 {
612 int err, pnum, scrub = 0, vol_id = vol->vol_id;
613 struct ubi_vid_io_buf *vidb;
614 struct ubi_vid_hdr *vid_hdr;
615 uint32_t uninitialized_var(crc);
616
617 err = leb_read_lock(ubi, vol_id, lnum);
618 if (err)
619 return err;
620
621 pnum = vol->eba_tbl->entries[lnum].pnum;
622 if (pnum >= 0) {
623 err = check_mapping(ubi, vol, lnum, &pnum);
624 if (err < 0)
625 goto out_unlock;
626 }
627
628 if (pnum == UBI_LEB_UNMAPPED) {
629 /*
630 * The logical eraseblock is not mapped, fill the whole buffer
631 * with 0xFF bytes. The exception is static volumes for which
632 * it is an error to read unmapped logical eraseblocks.
633 */
634 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
635 len, offset, vol_id, lnum);
636 leb_read_unlock(ubi, vol_id, lnum);
637 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
638 memset(buf, 0xFF, len);
639 return 0;
640 }
641
642 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
643 len, offset, vol_id, lnum, pnum);
644
645 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
646 check = 0;
647
648 retry:
649 if (check) {
650 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
651 if (!vidb) {
652 err = -ENOMEM;
653 goto out_unlock;
654 }
655
656 vid_hdr = ubi_get_vid_hdr(vidb);
657
658 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
659 if (err && err != UBI_IO_BITFLIPS) {
660 if (err > 0) {
661 /*
662 * The header is either absent or corrupted.
663 * The former case means there is a bug -
664 * switch to read-only mode just in case.
665 * The latter case means a real corruption - we
666 * may try to recover data. FIXME: but this is
667 * not implemented.
668 */
669 if (err == UBI_IO_BAD_HDR_EBADMSG ||
670 err == UBI_IO_BAD_HDR) {
671 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
672 pnum, vol_id, lnum);
673 err = -EBADMSG;
674 } else {
675 /*
676 * Ending up here in the non-Fastmap case
677 * is a clear bug as the VID header had to
678 * be present at scan time to have it referenced.
679 * With fastmap the story is more complicated.
680 * Fastmap has the mapping info without the need
681 * of a full scan. So the LEB could have been
682 * unmapped, Fastmap cannot know this and keeps
683 * the LEB referenced.
684 * This is valid and works as the layer above UBI
685 * has to do bookkeeping about used/referenced
686 * LEBs in any case.
687 */
688 if (ubi->fast_attach) {
689 err = -EBADMSG;
690 } else {
691 err = -EINVAL;
692 ubi_ro_mode(ubi);
693 }
694 }
695 }
696 goto out_free;
697 } else if (err == UBI_IO_BITFLIPS)
698 scrub = 1;
699
700 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
701 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
702
703 crc = be32_to_cpu(vid_hdr->data_crc);
704 ubi_free_vid_buf(vidb);
705 }
706
707 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
708 if (err) {
709 if (err == UBI_IO_BITFLIPS)
710 scrub = 1;
711 else if (mtd_is_eccerr(err)) {
712 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
713 goto out_unlock;
714 scrub = 1;
715 if (!check) {
716 ubi_msg(ubi, "force data checking");
717 check = 1;
718 goto retry;
719 }
720 } else
721 goto out_unlock;
722 }
723
724 if (check) {
725 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
726 if (crc1 != crc) {
727 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
728 crc1, crc);
729 err = -EBADMSG;
730 goto out_unlock;
731 }
732 }
733
734 if (scrub)
735 err = ubi_wl_scrub_peb(ubi, pnum);
736
737 leb_read_unlock(ubi, vol_id, lnum);
738 return err;
739
740 out_free:
741 ubi_free_vid_buf(vidb);
742 out_unlock:
743 leb_read_unlock(ubi, vol_id, lnum);
744 return err;
745 }
746
747 /**
748 * ubi_eba_read_leb_sg - read data into a scatter gather list.
749 * @ubi: UBI device description object
750 * @vol: volume description object
751 * @lnum: logical eraseblock number
752 * @sgl: UBI scatter gather list to store the read data
753 * @offset: offset from where to read
754 * @len: how many bytes to read
755 * @check: data CRC check flag
756 *
757 * This function works exactly like ubi_eba_read_leb(). But instead of
758 * storing the read data into a buffer it writes to an UBI scatter gather
759 * list.
760 */
ubi_eba_read_leb_sg(struct ubi_device * ubi,struct ubi_volume * vol,struct ubi_sgl * sgl,int lnum,int offset,int len,int check)761 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
762 struct ubi_sgl *sgl, int lnum, int offset, int len,
763 int check)
764 {
765 int to_read;
766 int ret;
767 struct scatterlist *sg;
768
769 for (;;) {
770 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
771 sg = &sgl->sg[sgl->list_pos];
772 if (len < sg->length - sgl->page_pos)
773 to_read = len;
774 else
775 to_read = sg->length - sgl->page_pos;
776
777 ret = ubi_eba_read_leb(ubi, vol, lnum,
778 sg_virt(sg) + sgl->page_pos, offset,
779 to_read, check);
780 if (ret < 0)
781 return ret;
782
783 offset += to_read;
784 len -= to_read;
785 if (!len) {
786 sgl->page_pos += to_read;
787 if (sgl->page_pos == sg->length) {
788 sgl->list_pos++;
789 sgl->page_pos = 0;
790 }
791
792 break;
793 }
794
795 sgl->list_pos++;
796 sgl->page_pos = 0;
797 }
798
799 return ret;
800 }
801
802 /**
803 * try_recover_peb - try to recover from write failure.
804 * @vol: volume description object
805 * @pnum: the physical eraseblock to recover
806 * @lnum: logical eraseblock number
807 * @buf: data which was not written because of the write failure
808 * @offset: offset of the failed write
809 * @len: how many bytes should have been written
810 * @vidb: VID buffer
811 * @retry: whether the caller should retry in case of failure
812 *
813 * This function is called in case of a write failure and moves all good data
814 * from the potentially bad physical eraseblock to a good physical eraseblock.
815 * This function also writes the data which was not written due to the failure.
816 * Returns 0 in case of success, and a negative error code in case of failure.
817 * In case of failure, the %retry parameter is set to false if this is a fatal
818 * error (retrying won't help), and true otherwise.
819 */
try_recover_peb(struct ubi_volume * vol,int pnum,int lnum,const void * buf,int offset,int len,struct ubi_vid_io_buf * vidb,bool * retry)820 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
821 const void *buf, int offset, int len,
822 struct ubi_vid_io_buf *vidb, bool *retry)
823 {
824 struct ubi_device *ubi = vol->ubi;
825 struct ubi_vid_hdr *vid_hdr;
826 int new_pnum, err, vol_id = vol->vol_id, data_size;
827 uint32_t crc;
828
829 *retry = false;
830
831 new_pnum = ubi_wl_get_peb(ubi);
832 if (new_pnum < 0) {
833 err = new_pnum;
834 goto out_put;
835 }
836
837 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
838 pnum, new_pnum);
839
840 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
841 if (err && err != UBI_IO_BITFLIPS) {
842 if (err > 0)
843 err = -EIO;
844 goto out_put;
845 }
846
847 vid_hdr = ubi_get_vid_hdr(vidb);
848 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
849
850 mutex_lock(&ubi->buf_mutex);
851 memset(ubi->peb_buf + offset, 0xFF, len);
852
853 /* Read everything before the area where the write failure happened */
854 if (offset > 0) {
855 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
856 if (err && err != UBI_IO_BITFLIPS)
857 goto out_unlock;
858 }
859
860 *retry = true;
861
862 memcpy(ubi->peb_buf + offset, buf, len);
863
864 data_size = offset + len;
865 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
866 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
867 vid_hdr->copy_flag = 1;
868 vid_hdr->data_size = cpu_to_be32(data_size);
869 vid_hdr->data_crc = cpu_to_be32(crc);
870 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
871 if (err)
872 goto out_unlock;
873
874 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
875
876 out_unlock:
877 mutex_unlock(&ubi->buf_mutex);
878
879 if (!err)
880 vol->eba_tbl->entries[lnum].pnum = new_pnum;
881
882 out_put:
883 up_read(&ubi->fm_eba_sem);
884
885 if (!err) {
886 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
887 ubi_msg(ubi, "data was successfully recovered");
888 } else if (new_pnum >= 0) {
889 /*
890 * Bad luck? This physical eraseblock is bad too? Crud. Let's
891 * try to get another one.
892 */
893 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
894 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
895 }
896
897 return err;
898 }
899
900 /**
901 * recover_peb - recover from write failure.
902 * @ubi: UBI device description object
903 * @pnum: the physical eraseblock to recover
904 * @vol_id: volume ID
905 * @lnum: logical eraseblock number
906 * @buf: data which was not written because of the write failure
907 * @offset: offset of the failed write
908 * @len: how many bytes should have been written
909 *
910 * This function is called in case of a write failure and moves all good data
911 * from the potentially bad physical eraseblock to a good physical eraseblock.
912 * This function also writes the data which was not written due to the failure.
913 * Returns 0 in case of success, and a negative error code in case of failure.
914 * This function tries %UBI_IO_RETRIES before giving up.
915 */
recover_peb(struct ubi_device * ubi,int pnum,int vol_id,int lnum,const void * buf,int offset,int len)916 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
917 const void *buf, int offset, int len)
918 {
919 int err, idx = vol_id2idx(ubi, vol_id), tries;
920 struct ubi_volume *vol = ubi->volumes[idx];
921 struct ubi_vid_io_buf *vidb;
922
923 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
924 if (!vidb)
925 return -ENOMEM;
926
927 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
928 bool retry;
929
930 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
931 &retry);
932 if (!err || !retry)
933 break;
934
935 ubi_msg(ubi, "try again");
936 }
937
938 ubi_free_vid_buf(vidb);
939
940 return err;
941 }
942
943 /**
944 * try_write_vid_and_data - try to write VID header and data to a new PEB.
945 * @vol: volume description object
946 * @lnum: logical eraseblock number
947 * @vidb: the VID buffer to write
948 * @buf: buffer containing the data
949 * @offset: where to start writing data
950 * @len: how many bytes should be written
951 *
952 * This function tries to write VID header and data belonging to logical
953 * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
954 * in case of success and a negative error code in case of failure.
955 * In case of error, it is possible that something was still written to the
956 * flash media, but may be some garbage.
957 */
try_write_vid_and_data(struct ubi_volume * vol,int lnum,struct ubi_vid_io_buf * vidb,const void * buf,int offset,int len)958 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
959 struct ubi_vid_io_buf *vidb, const void *buf,
960 int offset, int len)
961 {
962 struct ubi_device *ubi = vol->ubi;
963 int pnum, opnum, err, vol_id = vol->vol_id;
964
965 pnum = ubi_wl_get_peb(ubi);
966 if (pnum < 0) {
967 err = pnum;
968 goto out_put;
969 }
970
971 opnum = vol->eba_tbl->entries[lnum].pnum;
972
973 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
974 len, offset, vol_id, lnum, pnum);
975
976 err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
977 if (err) {
978 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
979 vol_id, lnum, pnum);
980 goto out_put;
981 }
982
983 if (len) {
984 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
985 if (err) {
986 ubi_warn(ubi,
987 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
988 len, offset, vol_id, lnum, pnum);
989 goto out_put;
990 }
991 }
992
993 vol->eba_tbl->entries[lnum].pnum = pnum;
994
995 out_put:
996 up_read(&ubi->fm_eba_sem);
997
998 if (err && pnum >= 0)
999 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
1000 else if (!err && opnum >= 0)
1001 err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
1002
1003 return err;
1004 }
1005
1006 /**
1007 * ubi_eba_write_leb - write data to dynamic volume.
1008 * @ubi: UBI device description object
1009 * @vol: volume description object
1010 * @lnum: logical eraseblock number
1011 * @buf: the data to write
1012 * @offset: offset within the logical eraseblock where to write
1013 * @len: how many bytes to write
1014 *
1015 * This function writes data to logical eraseblock @lnum of a dynamic volume
1016 * @vol. Returns zero in case of success and a negative error code in case
1017 * of failure. In case of error, it is possible that something was still
1018 * written to the flash media, but may be some garbage.
1019 * This function retries %UBI_IO_RETRIES times before giving up.
1020 */
ubi_eba_write_leb(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int offset,int len)1021 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1022 const void *buf, int offset, int len)
1023 {
1024 int err, pnum, tries, vol_id = vol->vol_id;
1025 struct ubi_vid_io_buf *vidb;
1026 struct ubi_vid_hdr *vid_hdr;
1027
1028 if (ubi->ro_mode)
1029 return -EROFS;
1030
1031 err = leb_write_lock(ubi, vol_id, lnum);
1032 if (err)
1033 return err;
1034
1035 pnum = vol->eba_tbl->entries[lnum].pnum;
1036 if (pnum >= 0) {
1037 err = check_mapping(ubi, vol, lnum, &pnum);
1038 if (err < 0)
1039 goto out;
1040 }
1041
1042 if (pnum >= 0) {
1043 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1044 len, offset, vol_id, lnum, pnum);
1045
1046 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1047 if (err) {
1048 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1049 if (err == -EIO && ubi->bad_allowed)
1050 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1051 offset, len);
1052 }
1053
1054 goto out;
1055 }
1056
1057 /*
1058 * The logical eraseblock is not mapped. We have to get a free physical
1059 * eraseblock and write the volume identifier header there first.
1060 */
1061 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1062 if (!vidb) {
1063 leb_write_unlock(ubi, vol_id, lnum);
1064 return -ENOMEM;
1065 }
1066
1067 vid_hdr = ubi_get_vid_hdr(vidb);
1068
1069 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1070 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1071 vid_hdr->vol_id = cpu_to_be32(vol_id);
1072 vid_hdr->lnum = cpu_to_be32(lnum);
1073 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1074 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1075
1076 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1077 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1078 if (err != -EIO || !ubi->bad_allowed)
1079 break;
1080
1081 /*
1082 * Fortunately, this is the first write operation to this
1083 * physical eraseblock, so just put it and request a new one.
1084 * We assume that if this physical eraseblock went bad, the
1085 * erase code will handle that.
1086 */
1087 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1088 ubi_msg(ubi, "try another PEB");
1089 }
1090
1091 ubi_free_vid_buf(vidb);
1092
1093 out:
1094 if (err)
1095 ubi_ro_mode(ubi);
1096
1097 leb_write_unlock(ubi, vol_id, lnum);
1098
1099 return err;
1100 }
1101
1102 /**
1103 * ubi_eba_write_leb_st - write data to static volume.
1104 * @ubi: UBI device description object
1105 * @vol: volume description object
1106 * @lnum: logical eraseblock number
1107 * @buf: data to write
1108 * @len: how many bytes to write
1109 * @used_ebs: how many logical eraseblocks will this volume contain
1110 *
1111 * This function writes data to logical eraseblock @lnum of static volume
1112 * @vol. The @used_ebs argument should contain total number of logical
1113 * eraseblock in this static volume.
1114 *
1115 * When writing to the last logical eraseblock, the @len argument doesn't have
1116 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
1117 * to the real data size, although the @buf buffer has to contain the
1118 * alignment. In all other cases, @len has to be aligned.
1119 *
1120 * It is prohibited to write more than once to logical eraseblocks of static
1121 * volumes. This function returns zero in case of success and a negative error
1122 * code in case of failure.
1123 */
ubi_eba_write_leb_st(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len,int used_ebs)1124 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1125 int lnum, const void *buf, int len, int used_ebs)
1126 {
1127 int err, tries, data_size = len, vol_id = vol->vol_id;
1128 struct ubi_vid_io_buf *vidb;
1129 struct ubi_vid_hdr *vid_hdr;
1130 uint32_t crc;
1131
1132 if (ubi->ro_mode)
1133 return -EROFS;
1134
1135 if (lnum == used_ebs - 1)
1136 /* If this is the last LEB @len may be unaligned */
1137 len = ALIGN(data_size, ubi->min_io_size);
1138 else
1139 ubi_assert(!(len & (ubi->min_io_size - 1)));
1140
1141 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1142 if (!vidb)
1143 return -ENOMEM;
1144
1145 vid_hdr = ubi_get_vid_hdr(vidb);
1146
1147 err = leb_write_lock(ubi, vol_id, lnum);
1148 if (err)
1149 goto out;
1150
1151 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1152 vid_hdr->vol_id = cpu_to_be32(vol_id);
1153 vid_hdr->lnum = cpu_to_be32(lnum);
1154 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1155 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1156
1157 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1158 vid_hdr->vol_type = UBI_VID_STATIC;
1159 vid_hdr->data_size = cpu_to_be32(data_size);
1160 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1161 vid_hdr->data_crc = cpu_to_be32(crc);
1162
1163 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1164
1165 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1166 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1167 if (err != -EIO || !ubi->bad_allowed)
1168 break;
1169
1170 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1171 ubi_msg(ubi, "try another PEB");
1172 }
1173
1174 if (err)
1175 ubi_ro_mode(ubi);
1176
1177 leb_write_unlock(ubi, vol_id, lnum);
1178
1179 out:
1180 ubi_free_vid_buf(vidb);
1181
1182 return err;
1183 }
1184
1185 /*
1186 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
1187 * @ubi: UBI device description object
1188 * @vol: volume description object
1189 * @lnum: logical eraseblock number
1190 * @buf: data to write
1191 * @len: how many bytes to write
1192 *
1193 * This function changes the contents of a logical eraseblock atomically. @buf
1194 * has to contain new logical eraseblock data, and @len - the length of the
1195 * data, which has to be aligned. This function guarantees that in case of an
1196 * unclean reboot the old contents is preserved. Returns zero in case of
1197 * success and a negative error code in case of failure.
1198 *
1199 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
1200 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
1201 */
ubi_eba_atomic_leb_change(struct ubi_device * ubi,struct ubi_volume * vol,int lnum,const void * buf,int len)1202 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1203 int lnum, const void *buf, int len)
1204 {
1205 int err, tries, vol_id = vol->vol_id;
1206 struct ubi_vid_io_buf *vidb;
1207 struct ubi_vid_hdr *vid_hdr;
1208 uint32_t crc;
1209
1210 if (ubi->ro_mode)
1211 return -EROFS;
1212
1213 if (len == 0) {
1214 /*
1215 * Special case when data length is zero. In this case the LEB
1216 * has to be unmapped and mapped somewhere else.
1217 */
1218 err = ubi_eba_unmap_leb(ubi, vol, lnum);
1219 if (err)
1220 return err;
1221 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1222 }
1223
1224 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1225 if (!vidb)
1226 return -ENOMEM;
1227
1228 vid_hdr = ubi_get_vid_hdr(vidb);
1229
1230 mutex_lock(&ubi->alc_mutex);
1231 err = leb_write_lock(ubi, vol_id, lnum);
1232 if (err)
1233 goto out_mutex;
1234
1235 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1236 vid_hdr->vol_id = cpu_to_be32(vol_id);
1237 vid_hdr->lnum = cpu_to_be32(lnum);
1238 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1239 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1240
1241 crc = crc32(UBI_CRC32_INIT, buf, len);
1242 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1243 vid_hdr->data_size = cpu_to_be32(len);
1244 vid_hdr->copy_flag = 1;
1245 vid_hdr->data_crc = cpu_to_be32(crc);
1246
1247 dbg_eba("change LEB %d:%d", vol_id, lnum);
1248
1249 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1250 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1251 if (err != -EIO || !ubi->bad_allowed)
1252 break;
1253
1254 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1255 ubi_msg(ubi, "try another PEB");
1256 }
1257
1258 /*
1259 * This flash device does not admit of bad eraseblocks or
1260 * something nasty and unexpected happened. Switch to read-only
1261 * mode just in case.
1262 */
1263 if (err)
1264 ubi_ro_mode(ubi);
1265
1266 leb_write_unlock(ubi, vol_id, lnum);
1267
1268 out_mutex:
1269 mutex_unlock(&ubi->alc_mutex);
1270 ubi_free_vid_buf(vidb);
1271 return err;
1272 }
1273
1274 /**
1275 * is_error_sane - check whether a read error is sane.
1276 * @err: code of the error happened during reading
1277 *
1278 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
1279 * cannot read data from the target PEB (an error @err happened). If the error
1280 * code is sane, then we treat this error as non-fatal. Otherwise the error is
1281 * fatal and UBI will be switched to R/O mode later.
1282 *
1283 * The idea is that we try not to switch to R/O mode if the read error is
1284 * something which suggests there was a real read problem. E.g., %-EIO. Or a
1285 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1286 * mode, simply because we do not know what happened at the MTD level, and we
1287 * cannot handle this. E.g., the underlying driver may have become crazy, and
1288 * it is safer to switch to R/O mode to preserve the data.
1289 *
1290 * And bear in mind, this is about reading from the target PEB, i.e. the PEB
1291 * which we have just written.
1292 */
is_error_sane(int err)1293 static int is_error_sane(int err)
1294 {
1295 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1296 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1297 return 0;
1298 return 1;
1299 }
1300
1301 /**
1302 * ubi_eba_copy_leb - copy logical eraseblock.
1303 * @ubi: UBI device description object
1304 * @from: physical eraseblock number from where to copy
1305 * @to: physical eraseblock number where to copy
1306 * @vid_hdr: VID header of the @from physical eraseblock
1307 *
1308 * This function copies logical eraseblock from physical eraseblock @from to
1309 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1310 * function. Returns:
1311 * o %0 in case of success;
1312 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1313 * o a negative error code in case of failure.
1314 */
ubi_eba_copy_leb(struct ubi_device * ubi,int from,int to,struct ubi_vid_io_buf * vidb)1315 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1316 struct ubi_vid_io_buf *vidb)
1317 {
1318 int err, vol_id, lnum, data_size, aldata_size, idx;
1319 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1320 struct ubi_volume *vol;
1321 uint32_t crc;
1322
1323 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1324
1325 vol_id = be32_to_cpu(vid_hdr->vol_id);
1326 lnum = be32_to_cpu(vid_hdr->lnum);
1327
1328 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1329
1330 if (vid_hdr->vol_type == UBI_VID_STATIC) {
1331 data_size = be32_to_cpu(vid_hdr->data_size);
1332 aldata_size = ALIGN(data_size, ubi->min_io_size);
1333 } else
1334 data_size = aldata_size =
1335 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1336
1337 idx = vol_id2idx(ubi, vol_id);
1338 spin_lock(&ubi->volumes_lock);
1339 /*
1340 * Note, we may race with volume deletion, which means that the volume
1341 * this logical eraseblock belongs to might be being deleted. Since the
1342 * volume deletion un-maps all the volume's logical eraseblocks, it will
1343 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1344 */
1345 vol = ubi->volumes[idx];
1346 spin_unlock(&ubi->volumes_lock);
1347 if (!vol) {
1348 /* No need to do further work, cancel */
1349 dbg_wl("volume %d is being removed, cancel", vol_id);
1350 return MOVE_CANCEL_RACE;
1351 }
1352
1353 /*
1354 * We do not want anybody to write to this logical eraseblock while we
1355 * are moving it, so lock it.
1356 *
1357 * Note, we are using non-waiting locking here, because we cannot sleep
1358 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1359 * unmapping the LEB which is mapped to the PEB we are going to move
1360 * (@from). This task locks the LEB and goes sleep in the
1361 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1362 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1363 * LEB is already locked, we just do not move it and return
1364 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1365 * we do not know the reasons of the contention - it may be just a
1366 * normal I/O on this LEB, so we want to re-try.
1367 */
1368 err = leb_write_trylock(ubi, vol_id, lnum);
1369 if (err) {
1370 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1371 return MOVE_RETRY;
1372 }
1373
1374 /*
1375 * The LEB might have been put meanwhile, and the task which put it is
1376 * probably waiting on @ubi->move_mutex. No need to continue the work,
1377 * cancel it.
1378 */
1379 if (vol->eba_tbl->entries[lnum].pnum != from) {
1380 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1381 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1382 err = MOVE_CANCEL_RACE;
1383 goto out_unlock_leb;
1384 }
1385
1386 /*
1387 * OK, now the LEB is locked and we can safely start moving it. Since
1388 * this function utilizes the @ubi->peb_buf buffer which is shared
1389 * with some other functions - we lock the buffer by taking the
1390 * @ubi->buf_mutex.
1391 */
1392 mutex_lock(&ubi->buf_mutex);
1393 dbg_wl("read %d bytes of data", aldata_size);
1394 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1395 if (err && err != UBI_IO_BITFLIPS) {
1396 ubi_warn(ubi, "error %d while reading data from PEB %d",
1397 err, from);
1398 err = MOVE_SOURCE_RD_ERR;
1399 goto out_unlock_buf;
1400 }
1401
1402 /*
1403 * Now we have got to calculate how much data we have to copy. In
1404 * case of a static volume it is fairly easy - the VID header contains
1405 * the data size. In case of a dynamic volume it is more difficult - we
1406 * have to read the contents, cut 0xFF bytes from the end and copy only
1407 * the first part. We must do this to avoid writing 0xFF bytes as it
1408 * may have some side-effects. And not only this. It is important not
1409 * to include those 0xFFs to CRC because later the they may be filled
1410 * by data.
1411 */
1412 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1413 aldata_size = data_size =
1414 ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1415
1416 cond_resched();
1417 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1418 cond_resched();
1419
1420 /*
1421 * It may turn out to be that the whole @from physical eraseblock
1422 * contains only 0xFF bytes. Then we have to only write the VID header
1423 * and do not write any data. This also means we should not set
1424 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1425 */
1426 if (data_size > 0) {
1427 vid_hdr->copy_flag = 1;
1428 vid_hdr->data_size = cpu_to_be32(data_size);
1429 vid_hdr->data_crc = cpu_to_be32(crc);
1430 }
1431 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1432
1433 err = ubi_io_write_vid_hdr(ubi, to, vidb);
1434 if (err) {
1435 if (err == -EIO)
1436 err = MOVE_TARGET_WR_ERR;
1437 goto out_unlock_buf;
1438 }
1439
1440 cond_resched();
1441
1442 /* Read the VID header back and check if it was written correctly */
1443 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1444 if (err) {
1445 if (err != UBI_IO_BITFLIPS) {
1446 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1447 err, to);
1448 if (is_error_sane(err))
1449 err = MOVE_TARGET_RD_ERR;
1450 } else
1451 err = MOVE_TARGET_BITFLIPS;
1452 goto out_unlock_buf;
1453 }
1454
1455 if (data_size > 0) {
1456 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1457 if (err) {
1458 if (err == -EIO)
1459 err = MOVE_TARGET_WR_ERR;
1460 goto out_unlock_buf;
1461 }
1462
1463 cond_resched();
1464 }
1465
1466 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1467 vol->eba_tbl->entries[lnum].pnum = to;
1468
1469 out_unlock_buf:
1470 mutex_unlock(&ubi->buf_mutex);
1471 out_unlock_leb:
1472 leb_write_unlock(ubi, vol_id, lnum);
1473 return err;
1474 }
1475
1476 /**
1477 * print_rsvd_warning - warn about not having enough reserved PEBs.
1478 * @ubi: UBI device description object
1479 *
1480 * This is a helper function for 'ubi_eba_init()' which is called when UBI
1481 * cannot reserve enough PEBs for bad block handling. This function makes a
1482 * decision whether we have to print a warning or not. The algorithm is as
1483 * follows:
1484 * o if this is a new UBI image, then just print the warning
1485 * o if this is an UBI image which has already been used for some time, print
1486 * a warning only if we can reserve less than 10% of the expected amount of
1487 * the reserved PEB.
1488 *
1489 * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1490 * of PEBs becomes smaller, which is normal and we do not want to scare users
1491 * with a warning every time they attach the MTD device. This was an issue
1492 * reported by real users.
1493 */
print_rsvd_warning(struct ubi_device * ubi,struct ubi_attach_info * ai)1494 static void print_rsvd_warning(struct ubi_device *ubi,
1495 struct ubi_attach_info *ai)
1496 {
1497 /*
1498 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1499 * large number to distinguish between newly flashed and used images.
1500 */
1501 if (ai->max_sqnum > (1 << 18)) {
1502 int min = ubi->beb_rsvd_level / 10;
1503
1504 if (!min)
1505 min = 1;
1506 if (ubi->beb_rsvd_pebs > min)
1507 return;
1508 }
1509
1510 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1511 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1512 if (ubi->corr_peb_count)
1513 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1514 ubi->corr_peb_count);
1515 }
1516
1517 /**
1518 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1519 * @ubi: UBI device description object
1520 * @ai_fastmap: UBI attach info object created by fastmap
1521 * @ai_scan: UBI attach info object created by scanning
1522 *
1523 * Returns < 0 in case of an internal error, 0 otherwise.
1524 * If a bad EBA table entry was found it will be printed out and
1525 * ubi_assert() triggers.
1526 */
self_check_eba(struct ubi_device * ubi,struct ubi_attach_info * ai_fastmap,struct ubi_attach_info * ai_scan)1527 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1528 struct ubi_attach_info *ai_scan)
1529 {
1530 int i, j, num_volumes, ret = 0;
1531 int **scan_eba, **fm_eba;
1532 struct ubi_ainf_volume *av;
1533 struct ubi_volume *vol;
1534 struct ubi_ainf_peb *aeb;
1535 struct rb_node *rb;
1536
1537 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1538
1539 scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
1540 if (!scan_eba)
1541 return -ENOMEM;
1542
1543 fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
1544 if (!fm_eba) {
1545 kfree(scan_eba);
1546 return -ENOMEM;
1547 }
1548
1549 for (i = 0; i < num_volumes; i++) {
1550 vol = ubi->volumes[i];
1551 if (!vol)
1552 continue;
1553
1554 scan_eba[i] = kmalloc_array(vol->reserved_pebs,
1555 sizeof(**scan_eba),
1556 GFP_KERNEL);
1557 if (!scan_eba[i]) {
1558 ret = -ENOMEM;
1559 goto out_free;
1560 }
1561
1562 fm_eba[i] = kmalloc_array(vol->reserved_pebs,
1563 sizeof(**fm_eba),
1564 GFP_KERNEL);
1565 if (!fm_eba[i]) {
1566 ret = -ENOMEM;
1567 goto out_free;
1568 }
1569
1570 for (j = 0; j < vol->reserved_pebs; j++)
1571 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1572
1573 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1574 if (!av)
1575 continue;
1576
1577 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1578 scan_eba[i][aeb->lnum] = aeb->pnum;
1579
1580 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1581 if (!av)
1582 continue;
1583
1584 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1585 fm_eba[i][aeb->lnum] = aeb->pnum;
1586
1587 for (j = 0; j < vol->reserved_pebs; j++) {
1588 if (scan_eba[i][j] != fm_eba[i][j]) {
1589 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1590 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1591 continue;
1592
1593 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1594 vol->vol_id, j, fm_eba[i][j],
1595 scan_eba[i][j]);
1596 ubi_assert(0);
1597 }
1598 }
1599 }
1600
1601 out_free:
1602 for (i = 0; i < num_volumes; i++) {
1603 if (!ubi->volumes[i])
1604 continue;
1605
1606 kfree(scan_eba[i]);
1607 kfree(fm_eba[i]);
1608 }
1609
1610 kfree(scan_eba);
1611 kfree(fm_eba);
1612 return ret;
1613 }
1614
1615 /**
1616 * ubi_eba_init - initialize the EBA sub-system using attaching information.
1617 * @ubi: UBI device description object
1618 * @ai: attaching information
1619 *
1620 * This function returns zero in case of success and a negative error code in
1621 * case of failure.
1622 */
ubi_eba_init(struct ubi_device * ubi,struct ubi_attach_info * ai)1623 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1624 {
1625 int i, err, num_volumes;
1626 struct ubi_ainf_volume *av;
1627 struct ubi_volume *vol;
1628 struct ubi_ainf_peb *aeb;
1629 struct rb_node *rb;
1630
1631 dbg_eba("initialize EBA sub-system");
1632
1633 spin_lock_init(&ubi->ltree_lock);
1634 mutex_init(&ubi->alc_mutex);
1635 ubi->ltree = RB_ROOT;
1636
1637 ubi->global_sqnum = ai->max_sqnum + 1;
1638 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1639
1640 for (i = 0; i < num_volumes; i++) {
1641 struct ubi_eba_table *tbl;
1642
1643 vol = ubi->volumes[i];
1644 if (!vol)
1645 continue;
1646
1647 cond_resched();
1648
1649 tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1650 if (IS_ERR(tbl)) {
1651 err = PTR_ERR(tbl);
1652 goto out_free;
1653 }
1654
1655 ubi_eba_replace_table(vol, tbl);
1656
1657 av = ubi_find_av(ai, idx2vol_id(ubi, i));
1658 if (!av)
1659 continue;
1660
1661 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1662 if (aeb->lnum >= vol->reserved_pebs) {
1663 /*
1664 * This may happen in case of an unclean reboot
1665 * during re-size.
1666 */
1667 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1668 } else {
1669 struct ubi_eba_entry *entry;
1670
1671 entry = &vol->eba_tbl->entries[aeb->lnum];
1672 entry->pnum = aeb->pnum;
1673 }
1674 }
1675 }
1676
1677 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1678 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1679 ubi->avail_pebs, EBA_RESERVED_PEBS);
1680 if (ubi->corr_peb_count)
1681 ubi_err(ubi, "%d PEBs are corrupted and not used",
1682 ubi->corr_peb_count);
1683 err = -ENOSPC;
1684 goto out_free;
1685 }
1686 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1687 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1688
1689 if (ubi->bad_allowed) {
1690 ubi_calculate_reserved(ubi);
1691
1692 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1693 /* No enough free physical eraseblocks */
1694 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1695 print_rsvd_warning(ubi, ai);
1696 } else
1697 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1698
1699 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1700 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1701 }
1702
1703 dbg_eba("EBA sub-system is initialized");
1704 return 0;
1705
1706 out_free:
1707 for (i = 0; i < num_volumes; i++) {
1708 if (!ubi->volumes[i])
1709 continue;
1710 ubi_eba_replace_table(ubi->volumes[i], NULL);
1711 }
1712 return err;
1713 }
1714