1 /*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17 #include <linux/crc32.h>
18 #include <linux/bitmap.h>
19 #include "ubi.h"
20
21 /**
22 * init_seen - allocate memory for used for debugging.
23 * @ubi: UBI device description object
24 */
init_seen(struct ubi_device * ubi)25 static inline unsigned long *init_seen(struct ubi_device *ubi)
26 {
27 unsigned long *ret;
28
29 if (!ubi_dbg_chk_fastmap(ubi))
30 return NULL;
31
32 ret = kcalloc(BITS_TO_LONGS(ubi->peb_count), sizeof(unsigned long),
33 GFP_KERNEL);
34 if (!ret)
35 return ERR_PTR(-ENOMEM);
36
37 return ret;
38 }
39
40 /**
41 * free_seen - free the seen logic integer array.
42 * @seen: integer array of @ubi->peb_count size
43 */
free_seen(unsigned long * seen)44 static inline void free_seen(unsigned long *seen)
45 {
46 kfree(seen);
47 }
48
49 /**
50 * set_seen - mark a PEB as seen.
51 * @ubi: UBI device description object
52 * @pnum: The PEB to be makred as seen
53 * @seen: integer array of @ubi->peb_count size
54 */
set_seen(struct ubi_device * ubi,int pnum,unsigned long * seen)55 static inline void set_seen(struct ubi_device *ubi, int pnum, unsigned long *seen)
56 {
57 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
58 return;
59
60 set_bit(pnum, seen);
61 }
62
63 /**
64 * self_check_seen - check whether all PEB have been seen by fastmap.
65 * @ubi: UBI device description object
66 * @seen: integer array of @ubi->peb_count size
67 */
self_check_seen(struct ubi_device * ubi,unsigned long * seen)68 static int self_check_seen(struct ubi_device *ubi, unsigned long *seen)
69 {
70 int pnum, ret = 0;
71
72 if (!ubi_dbg_chk_fastmap(ubi) || !seen)
73 return 0;
74
75 for (pnum = 0; pnum < ubi->peb_count; pnum++) {
76 if (test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
77 ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
78 ret = -EINVAL;
79 }
80 }
81
82 return ret;
83 }
84
85 /**
86 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
87 * @ubi: UBI device description object
88 */
ubi_calc_fm_size(struct ubi_device * ubi)89 size_t ubi_calc_fm_size(struct ubi_device *ubi)
90 {
91 size_t size;
92
93 size = sizeof(struct ubi_fm_sb) +
94 sizeof(struct ubi_fm_hdr) +
95 sizeof(struct ubi_fm_scan_pool) +
96 sizeof(struct ubi_fm_scan_pool) +
97 (ubi->peb_count * sizeof(struct ubi_fm_ec)) +
98 (sizeof(struct ubi_fm_eba) +
99 (ubi->peb_count * sizeof(__be32))) +
100 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
101 return roundup(size, ubi->leb_size);
102 }
103
104
105 /**
106 * new_fm_vhdr - allocate a new volume header for fastmap usage.
107 * @ubi: UBI device description object
108 * @vol_id: the VID of the new header
109 *
110 * Returns a new struct ubi_vid_hdr on success.
111 * NULL indicates out of memory.
112 */
new_fm_vbuf(struct ubi_device * ubi,int vol_id)113 static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
114 {
115 struct ubi_vid_io_buf *new;
116 struct ubi_vid_hdr *vh;
117
118 new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
119 if (!new)
120 goto out;
121
122 vh = ubi_get_vid_hdr(new);
123 vh->vol_type = UBI_VID_DYNAMIC;
124 vh->vol_id = cpu_to_be32(vol_id);
125
126 /* UBI implementations without fastmap support have to delete the
127 * fastmap.
128 */
129 vh->compat = UBI_COMPAT_DELETE;
130
131 out:
132 return new;
133 }
134
135 /**
136 * add_aeb - create and add a attach erase block to a given list.
137 * @ai: UBI attach info object
138 * @list: the target list
139 * @pnum: PEB number of the new attach erase block
140 * @ec: erease counter of the new LEB
141 * @scrub: scrub this PEB after attaching
142 *
143 * Returns 0 on success, < 0 indicates an internal error.
144 */
add_aeb(struct ubi_attach_info * ai,struct list_head * list,int pnum,int ec,int scrub)145 static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
146 int pnum, int ec, int scrub)
147 {
148 struct ubi_ainf_peb *aeb;
149
150 aeb = ubi_alloc_aeb(ai, pnum, ec);
151 if (!aeb)
152 return -ENOMEM;
153
154 aeb->lnum = -1;
155 aeb->scrub = scrub;
156 aeb->copy_flag = aeb->sqnum = 0;
157
158 ai->ec_sum += aeb->ec;
159 ai->ec_count++;
160
161 if (ai->max_ec < aeb->ec)
162 ai->max_ec = aeb->ec;
163
164 if (ai->min_ec > aeb->ec)
165 ai->min_ec = aeb->ec;
166
167 list_add_tail(&aeb->u.list, list);
168
169 return 0;
170 }
171
172 /**
173 * add_vol - create and add a new volume to ubi_attach_info.
174 * @ai: ubi_attach_info object
175 * @vol_id: VID of the new volume
176 * @used_ebs: number of used EBS
177 * @data_pad: data padding value of the new volume
178 * @vol_type: volume type
179 * @last_eb_bytes: number of bytes in the last LEB
180 *
181 * Returns the new struct ubi_ainf_volume on success.
182 * NULL indicates an error.
183 */
add_vol(struct ubi_attach_info * ai,int vol_id,int used_ebs,int data_pad,u8 vol_type,int last_eb_bytes)184 static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
185 int used_ebs, int data_pad, u8 vol_type,
186 int last_eb_bytes)
187 {
188 struct ubi_ainf_volume *av;
189
190 av = ubi_add_av(ai, vol_id);
191 if (IS_ERR(av))
192 return av;
193
194 av->data_pad = data_pad;
195 av->last_data_size = last_eb_bytes;
196 av->compat = 0;
197 av->vol_type = vol_type;
198 if (av->vol_type == UBI_STATIC_VOLUME)
199 av->used_ebs = used_ebs;
200
201 dbg_bld("found volume (ID %i)", vol_id);
202 return av;
203 }
204
205 /**
206 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
207 * from it's original list.
208 * @ai: ubi_attach_info object
209 * @aeb: the to be assigned SEB
210 * @av: target scan volume
211 */
assign_aeb_to_av(struct ubi_attach_info * ai,struct ubi_ainf_peb * aeb,struct ubi_ainf_volume * av)212 static void assign_aeb_to_av(struct ubi_attach_info *ai,
213 struct ubi_ainf_peb *aeb,
214 struct ubi_ainf_volume *av)
215 {
216 struct ubi_ainf_peb *tmp_aeb;
217 struct rb_node **p = &av->root.rb_node, *parent = NULL;
218
219 while (*p) {
220 parent = *p;
221
222 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
223 if (aeb->lnum != tmp_aeb->lnum) {
224 if (aeb->lnum < tmp_aeb->lnum)
225 p = &(*p)->rb_left;
226 else
227 p = &(*p)->rb_right;
228
229 continue;
230 } else
231 break;
232 }
233
234 list_del(&aeb->u.list);
235 av->leb_count++;
236
237 rb_link_node(&aeb->u.rb, parent, p);
238 rb_insert_color(&aeb->u.rb, &av->root);
239 }
240
241 /**
242 * update_vol - inserts or updates a LEB which was found a pool.
243 * @ubi: the UBI device object
244 * @ai: attach info object
245 * @av: the volume this LEB belongs to
246 * @new_vh: the volume header derived from new_aeb
247 * @new_aeb: the AEB to be examined
248 *
249 * Returns 0 on success, < 0 indicates an internal error.
250 */
update_vol(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_ainf_volume * av,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)251 static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
252 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
253 struct ubi_ainf_peb *new_aeb)
254 {
255 struct rb_node **p = &av->root.rb_node, *parent = NULL;
256 struct ubi_ainf_peb *aeb, *victim;
257 int cmp_res;
258
259 while (*p) {
260 parent = *p;
261 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
262
263 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
264 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
265 p = &(*p)->rb_left;
266 else
267 p = &(*p)->rb_right;
268
269 continue;
270 }
271
272 /* This case can happen if the fastmap gets written
273 * because of a volume change (creation, deletion, ..).
274 * Then a PEB can be within the persistent EBA and the pool.
275 */
276 if (aeb->pnum == new_aeb->pnum) {
277 ubi_assert(aeb->lnum == new_aeb->lnum);
278 ubi_free_aeb(ai, new_aeb);
279
280 return 0;
281 }
282
283 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
284 if (cmp_res < 0)
285 return cmp_res;
286
287 /* new_aeb is newer */
288 if (cmp_res & 1) {
289 victim = ubi_alloc_aeb(ai, aeb->pnum, aeb->ec);
290 if (!victim)
291 return -ENOMEM;
292
293 list_add_tail(&victim->u.list, &ai->erase);
294
295 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
296 av->last_data_size =
297 be32_to_cpu(new_vh->data_size);
298
299 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
300 av->vol_id, aeb->lnum, new_aeb->pnum);
301
302 aeb->ec = new_aeb->ec;
303 aeb->pnum = new_aeb->pnum;
304 aeb->copy_flag = new_vh->copy_flag;
305 aeb->scrub = new_aeb->scrub;
306 aeb->sqnum = new_aeb->sqnum;
307 ubi_free_aeb(ai, new_aeb);
308
309 /* new_aeb is older */
310 } else {
311 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
312 av->vol_id, aeb->lnum, new_aeb->pnum);
313 list_add_tail(&new_aeb->u.list, &ai->erase);
314 }
315
316 return 0;
317 }
318 /* This LEB is new, let's add it to the volume */
319
320 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
321 av->highest_lnum = be32_to_cpu(new_vh->lnum);
322 av->last_data_size = be32_to_cpu(new_vh->data_size);
323 }
324
325 if (av->vol_type == UBI_STATIC_VOLUME)
326 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
327
328 av->leb_count++;
329
330 rb_link_node(&new_aeb->u.rb, parent, p);
331 rb_insert_color(&new_aeb->u.rb, &av->root);
332
333 return 0;
334 }
335
336 /**
337 * process_pool_aeb - we found a non-empty PEB in a pool.
338 * @ubi: UBI device object
339 * @ai: attach info object
340 * @new_vh: the volume header derived from new_aeb
341 * @new_aeb: the AEB to be examined
342 *
343 * Returns 0 on success, < 0 indicates an internal error.
344 */
process_pool_aeb(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_vid_hdr * new_vh,struct ubi_ainf_peb * new_aeb)345 static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
346 struct ubi_vid_hdr *new_vh,
347 struct ubi_ainf_peb *new_aeb)
348 {
349 int vol_id = be32_to_cpu(new_vh->vol_id);
350 struct ubi_ainf_volume *av;
351
352 if (vol_id == UBI_FM_SB_VOLUME_ID || vol_id == UBI_FM_DATA_VOLUME_ID) {
353 ubi_free_aeb(ai, new_aeb);
354
355 return 0;
356 }
357
358 /* Find the volume this SEB belongs to */
359 av = ubi_find_av(ai, vol_id);
360 if (!av) {
361 ubi_err(ubi, "orphaned volume in fastmap pool!");
362 ubi_free_aeb(ai, new_aeb);
363 return UBI_BAD_FASTMAP;
364 }
365
366 ubi_assert(vol_id == av->vol_id);
367
368 return update_vol(ubi, ai, av, new_vh, new_aeb);
369 }
370
371 /**
372 * unmap_peb - unmap a PEB.
373 * If fastmap detects a free PEB in the pool it has to check whether
374 * this PEB has been unmapped after writing the fastmap.
375 *
376 * @ai: UBI attach info object
377 * @pnum: The PEB to be unmapped
378 */
unmap_peb(struct ubi_attach_info * ai,int pnum)379 static void unmap_peb(struct ubi_attach_info *ai, int pnum)
380 {
381 struct ubi_ainf_volume *av;
382 struct rb_node *node, *node2;
383 struct ubi_ainf_peb *aeb;
384
385 ubi_rb_for_each_entry(node, av, &ai->volumes, rb) {
386 ubi_rb_for_each_entry(node2, aeb, &av->root, u.rb) {
387 if (aeb->pnum == pnum) {
388 rb_erase(&aeb->u.rb, &av->root);
389 av->leb_count--;
390 ubi_free_aeb(ai, aeb);
391 return;
392 }
393 }
394 }
395 }
396
397 /**
398 * scan_pool - scans a pool for changed (no longer empty PEBs).
399 * @ubi: UBI device object
400 * @ai: attach info object
401 * @pebs: an array of all PEB numbers in the to be scanned pool
402 * @pool_size: size of the pool (number of entries in @pebs)
403 * @max_sqnum: pointer to the maximal sequence number
404 * @free: list of PEBs which are most likely free (and go into @ai->free)
405 *
406 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
407 * < 0 indicates an internal error.
408 */
scan_pool(struct ubi_device * ubi,struct ubi_attach_info * ai,__be32 * pebs,int pool_size,unsigned long long * max_sqnum,struct list_head * free)409 static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
410 __be32 *pebs, int pool_size, unsigned long long *max_sqnum,
411 struct list_head *free)
412 {
413 struct ubi_vid_io_buf *vb;
414 struct ubi_vid_hdr *vh;
415 struct ubi_ec_hdr *ech;
416 struct ubi_ainf_peb *new_aeb;
417 int i, pnum, err, ret = 0;
418
419 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
420 if (!ech)
421 return -ENOMEM;
422
423 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
424 if (!vb) {
425 kfree(ech);
426 return -ENOMEM;
427 }
428
429 vh = ubi_get_vid_hdr(vb);
430
431 dbg_bld("scanning fastmap pool: size = %i", pool_size);
432
433 /*
434 * Now scan all PEBs in the pool to find changes which have been made
435 * after the creation of the fastmap
436 */
437 for (i = 0; i < pool_size; i++) {
438 int scrub = 0;
439 int image_seq;
440
441 pnum = be32_to_cpu(pebs[i]);
442
443 if (ubi_io_is_bad(ubi, pnum)) {
444 ubi_err(ubi, "bad PEB in fastmap pool!");
445 ret = UBI_BAD_FASTMAP;
446 goto out;
447 }
448
449 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
450 if (err && err != UBI_IO_BITFLIPS) {
451 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
452 pnum, err);
453 ret = err > 0 ? UBI_BAD_FASTMAP : err;
454 goto out;
455 } else if (err == UBI_IO_BITFLIPS)
456 scrub = 1;
457
458 /*
459 * Older UBI implementations have image_seq set to zero, so
460 * we shouldn't fail if image_seq == 0.
461 */
462 image_seq = be32_to_cpu(ech->image_seq);
463
464 if (image_seq && (image_seq != ubi->image_seq)) {
465 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
466 be32_to_cpu(ech->image_seq), ubi->image_seq);
467 ret = UBI_BAD_FASTMAP;
468 goto out;
469 }
470
471 err = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
472 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
473 unsigned long long ec = be64_to_cpu(ech->ec);
474 unmap_peb(ai, pnum);
475 dbg_bld("Adding PEB to free: %i", pnum);
476
477 if (err == UBI_IO_FF_BITFLIPS)
478 scrub = 1;
479
480 add_aeb(ai, free, pnum, ec, scrub);
481 continue;
482 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
483 dbg_bld("Found non empty PEB:%i in pool", pnum);
484
485 if (err == UBI_IO_BITFLIPS)
486 scrub = 1;
487
488 new_aeb = ubi_alloc_aeb(ai, pnum, be64_to_cpu(ech->ec));
489 if (!new_aeb) {
490 ret = -ENOMEM;
491 goto out;
492 }
493
494 new_aeb->lnum = be32_to_cpu(vh->lnum);
495 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
496 new_aeb->copy_flag = vh->copy_flag;
497 new_aeb->scrub = scrub;
498
499 if (*max_sqnum < new_aeb->sqnum)
500 *max_sqnum = new_aeb->sqnum;
501
502 err = process_pool_aeb(ubi, ai, vh, new_aeb);
503 if (err) {
504 ret = err > 0 ? UBI_BAD_FASTMAP : err;
505 goto out;
506 }
507 } else {
508 /* We are paranoid and fall back to scanning mode */
509 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
510 ret = err > 0 ? UBI_BAD_FASTMAP : err;
511 goto out;
512 }
513
514 }
515
516 out:
517 ubi_free_vid_buf(vb);
518 kfree(ech);
519 return ret;
520 }
521
522 /**
523 * count_fastmap_pebs - Counts the PEBs found by fastmap.
524 * @ai: The UBI attach info object
525 */
count_fastmap_pebs(struct ubi_attach_info * ai)526 static int count_fastmap_pebs(struct ubi_attach_info *ai)
527 {
528 struct ubi_ainf_peb *aeb;
529 struct ubi_ainf_volume *av;
530 struct rb_node *rb1, *rb2;
531 int n = 0;
532
533 list_for_each_entry(aeb, &ai->erase, u.list)
534 n++;
535
536 list_for_each_entry(aeb, &ai->free, u.list)
537 n++;
538
539 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
540 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
541 n++;
542
543 return n;
544 }
545
546 /**
547 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
548 * @ubi: UBI device object
549 * @ai: UBI attach info object
550 * @fm: the fastmap to be attached
551 *
552 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
553 * < 0 indicates an internal error.
554 */
ubi_attach_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_fastmap_layout * fm)555 static int ubi_attach_fastmap(struct ubi_device *ubi,
556 struct ubi_attach_info *ai,
557 struct ubi_fastmap_layout *fm)
558 {
559 struct list_head used, free;
560 struct ubi_ainf_volume *av;
561 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
562 struct ubi_fm_sb *fmsb;
563 struct ubi_fm_hdr *fmhdr;
564 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
565 struct ubi_fm_ec *fmec;
566 struct ubi_fm_volhdr *fmvhdr;
567 struct ubi_fm_eba *fm_eba;
568 int ret, i, j, pool_size, wl_pool_size;
569 size_t fm_pos = 0, fm_size = ubi->fm_size;
570 unsigned long long max_sqnum = 0;
571 void *fm_raw = ubi->fm_buf;
572
573 INIT_LIST_HEAD(&used);
574 INIT_LIST_HEAD(&free);
575 ai->min_ec = UBI_MAX_ERASECOUNTER;
576
577 fmsb = (struct ubi_fm_sb *)(fm_raw);
578 ai->max_sqnum = fmsb->sqnum;
579 fm_pos += sizeof(struct ubi_fm_sb);
580 if (fm_pos >= fm_size)
581 goto fail_bad;
582
583 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
584 fm_pos += sizeof(*fmhdr);
585 if (fm_pos >= fm_size)
586 goto fail_bad;
587
588 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
589 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
590 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
591 goto fail_bad;
592 }
593
594 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
595 fm_pos += sizeof(*fmpl);
596 if (fm_pos >= fm_size)
597 goto fail_bad;
598 if (be32_to_cpu(fmpl->magic) != UBI_FM_POOL_MAGIC) {
599 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
600 be32_to_cpu(fmpl->magic), UBI_FM_POOL_MAGIC);
601 goto fail_bad;
602 }
603
604 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
605 fm_pos += sizeof(*fmpl_wl);
606 if (fm_pos >= fm_size)
607 goto fail_bad;
608 if (be32_to_cpu(fmpl_wl->magic) != UBI_FM_POOL_MAGIC) {
609 ubi_err(ubi, "bad fastmap WL pool magic: 0x%x, expected: 0x%x",
610 be32_to_cpu(fmpl_wl->magic), UBI_FM_POOL_MAGIC);
611 goto fail_bad;
612 }
613
614 pool_size = be16_to_cpu(fmpl->size);
615 wl_pool_size = be16_to_cpu(fmpl_wl->size);
616 fm->max_pool_size = be16_to_cpu(fmpl->max_size);
617 fm->max_wl_pool_size = be16_to_cpu(fmpl_wl->max_size);
618
619 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
620 ubi_err(ubi, "bad pool size: %i", pool_size);
621 goto fail_bad;
622 }
623
624 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
625 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
626 goto fail_bad;
627 }
628
629
630 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
631 fm->max_pool_size < 0) {
632 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
633 goto fail_bad;
634 }
635
636 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
637 fm->max_wl_pool_size < 0) {
638 ubi_err(ubi, "bad maximal WL pool size: %i",
639 fm->max_wl_pool_size);
640 goto fail_bad;
641 }
642
643 /* read EC values from free list */
644 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
645 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
646 fm_pos += sizeof(*fmec);
647 if (fm_pos >= fm_size)
648 goto fail_bad;
649
650 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
651 be32_to_cpu(fmec->ec), 0);
652 }
653
654 /* read EC values from used list */
655 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
656 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
657 fm_pos += sizeof(*fmec);
658 if (fm_pos >= fm_size)
659 goto fail_bad;
660
661 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
662 be32_to_cpu(fmec->ec), 0);
663 }
664
665 /* read EC values from scrub list */
666 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
667 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
668 fm_pos += sizeof(*fmec);
669 if (fm_pos >= fm_size)
670 goto fail_bad;
671
672 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
673 be32_to_cpu(fmec->ec), 1);
674 }
675
676 /* read EC values from erase list */
677 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
678 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
679 fm_pos += sizeof(*fmec);
680 if (fm_pos >= fm_size)
681 goto fail_bad;
682
683 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
684 be32_to_cpu(fmec->ec), 1);
685 }
686
687 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
688 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
689
690 /* Iterate over all volumes and read their EBA table */
691 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
692 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
693 fm_pos += sizeof(*fmvhdr);
694 if (fm_pos >= fm_size)
695 goto fail_bad;
696
697 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
698 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
699 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
700 goto fail_bad;
701 }
702
703 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
704 be32_to_cpu(fmvhdr->used_ebs),
705 be32_to_cpu(fmvhdr->data_pad),
706 fmvhdr->vol_type,
707 be32_to_cpu(fmvhdr->last_eb_bytes));
708
709 if (IS_ERR(av)) {
710 if (PTR_ERR(av) == -EEXIST)
711 ubi_err(ubi, "volume (ID %i) already exists",
712 fmvhdr->vol_id);
713
714 goto fail_bad;
715 }
716
717 ai->vols_found++;
718 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
719 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
720
721 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
722 fm_pos += sizeof(*fm_eba);
723 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
724 if (fm_pos >= fm_size)
725 goto fail_bad;
726
727 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
728 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
729 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
730 goto fail_bad;
731 }
732
733 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
734 int pnum = be32_to_cpu(fm_eba->pnum[j]);
735
736 if (pnum < 0)
737 continue;
738
739 aeb = NULL;
740 list_for_each_entry(tmp_aeb, &used, u.list) {
741 if (tmp_aeb->pnum == pnum) {
742 aeb = tmp_aeb;
743 break;
744 }
745 }
746
747 if (!aeb) {
748 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
749 goto fail_bad;
750 }
751
752 aeb->lnum = j;
753
754 if (av->highest_lnum <= aeb->lnum)
755 av->highest_lnum = aeb->lnum;
756
757 assign_aeb_to_av(ai, aeb, av);
758
759 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
760 aeb->pnum, aeb->lnum, av->vol_id);
761 }
762 }
763
764 ret = scan_pool(ubi, ai, fmpl->pebs, pool_size, &max_sqnum, &free);
765 if (ret)
766 goto fail;
767
768 ret = scan_pool(ubi, ai, fmpl_wl->pebs, wl_pool_size, &max_sqnum, &free);
769 if (ret)
770 goto fail;
771
772 if (max_sqnum > ai->max_sqnum)
773 ai->max_sqnum = max_sqnum;
774
775 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
776 list_move_tail(&tmp_aeb->u.list, &ai->free);
777
778 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
779 list_move_tail(&tmp_aeb->u.list, &ai->erase);
780
781 ubi_assert(list_empty(&free));
782
783 /*
784 * If fastmap is leaking PEBs (must not happen), raise a
785 * fat warning and fall back to scanning mode.
786 * We do this here because in ubi_wl_init() it's too late
787 * and we cannot fall back to scanning.
788 */
789 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
790 ai->bad_peb_count - fm->used_blocks))
791 goto fail_bad;
792
793 return 0;
794
795 fail_bad:
796 ret = UBI_BAD_FASTMAP;
797 fail:
798 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
799 list_del(&tmp_aeb->u.list);
800 ubi_free_aeb(ai, tmp_aeb);
801 }
802 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
803 list_del(&tmp_aeb->u.list);
804 ubi_free_aeb(ai, tmp_aeb);
805 }
806
807 return ret;
808 }
809
810 /**
811 * find_fm_anchor - find the most recent Fastmap superblock (anchor)
812 * @ai: UBI attach info to be filled
813 */
find_fm_anchor(struct ubi_attach_info * ai)814 static int find_fm_anchor(struct ubi_attach_info *ai)
815 {
816 int ret = -1;
817 struct ubi_ainf_peb *aeb;
818 unsigned long long max_sqnum = 0;
819
820 list_for_each_entry(aeb, &ai->fastmap, u.list) {
821 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
822 max_sqnum = aeb->sqnum;
823 ret = aeb->pnum;
824 }
825 }
826
827 return ret;
828 }
829
clone_aeb(struct ubi_attach_info * ai,struct ubi_ainf_peb * old)830 static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai,
831 struct ubi_ainf_peb *old)
832 {
833 struct ubi_ainf_peb *new;
834
835 new = ubi_alloc_aeb(ai, old->pnum, old->ec);
836 if (!new)
837 return NULL;
838
839 new->vol_id = old->vol_id;
840 new->sqnum = old->sqnum;
841 new->lnum = old->lnum;
842 new->scrub = old->scrub;
843 new->copy_flag = old->copy_flag;
844
845 return new;
846 }
847
848 /**
849 * ubi_scan_fastmap - scan the fastmap.
850 * @ubi: UBI device object
851 * @ai: UBI attach info to be filled
852 * @scan_ai: UBI attach info from the first 64 PEBs,
853 * used to find the most recent Fastmap data structure
854 *
855 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
856 * UBI_BAD_FASTMAP if one was found but is not usable.
857 * < 0 indicates an internal error.
858 */
ubi_scan_fastmap(struct ubi_device * ubi,struct ubi_attach_info * ai,struct ubi_attach_info * scan_ai)859 int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
860 struct ubi_attach_info *scan_ai)
861 {
862 struct ubi_fm_sb *fmsb, *fmsb2;
863 struct ubi_vid_io_buf *vb;
864 struct ubi_vid_hdr *vh;
865 struct ubi_ec_hdr *ech;
866 struct ubi_fastmap_layout *fm;
867 struct ubi_ainf_peb *aeb;
868 int i, used_blocks, pnum, fm_anchor, ret = 0;
869 size_t fm_size;
870 __be32 crc, tmp_crc;
871 unsigned long long sqnum = 0;
872
873 fm_anchor = find_fm_anchor(scan_ai);
874 if (fm_anchor < 0)
875 return UBI_NO_FASTMAP;
876
877 /* Copy all (possible) fastmap blocks into our new attach structure. */
878 list_for_each_entry(aeb, &scan_ai->fastmap, u.list) {
879 struct ubi_ainf_peb *new;
880
881 new = clone_aeb(ai, aeb);
882 if (!new)
883 return -ENOMEM;
884
885 list_add(&new->u.list, &ai->fastmap);
886 }
887
888 down_write(&ubi->fm_protect);
889 memset(ubi->fm_buf, 0, ubi->fm_size);
890
891 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
892 if (!fmsb) {
893 ret = -ENOMEM;
894 goto out;
895 }
896
897 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
898 if (!fm) {
899 ret = -ENOMEM;
900 kfree(fmsb);
901 goto out;
902 }
903
904 ret = ubi_io_read_data(ubi, fmsb, fm_anchor, 0, sizeof(*fmsb));
905 if (ret && ret != UBI_IO_BITFLIPS)
906 goto free_fm_sb;
907 else if (ret == UBI_IO_BITFLIPS)
908 fm->to_be_tortured[0] = 1;
909
910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 ret = UBI_BAD_FASTMAP;
914 goto free_fm_sb;
915 }
916
917 if (fmsb->version != UBI_FM_FMT_VERSION) {
918 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 fmsb->version, UBI_FM_FMT_VERSION);
920 ret = UBI_BAD_FASTMAP;
921 goto free_fm_sb;
922 }
923
924 used_blocks = be32_to_cpu(fmsb->used_blocks);
925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
927 used_blocks);
928 ret = UBI_BAD_FASTMAP;
929 goto free_fm_sb;
930 }
931
932 fm_size = ubi->leb_size * used_blocks;
933 if (fm_size != ubi->fm_size) {
934 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
935 fm_size, ubi->fm_size);
936 ret = UBI_BAD_FASTMAP;
937 goto free_fm_sb;
938 }
939
940 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
941 if (!ech) {
942 ret = -ENOMEM;
943 goto free_fm_sb;
944 }
945
946 vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
947 if (!vb) {
948 ret = -ENOMEM;
949 goto free_hdr;
950 }
951
952 vh = ubi_get_vid_hdr(vb);
953
954 for (i = 0; i < used_blocks; i++) {
955 int image_seq;
956
957 pnum = be32_to_cpu(fmsb->block_loc[i]);
958
959 if (ubi_io_is_bad(ubi, pnum)) {
960 ret = UBI_BAD_FASTMAP;
961 goto free_hdr;
962 }
963
964 if (i == 0 && pnum != fm_anchor) {
965 ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
966 pnum, fm_anchor);
967 ret = UBI_BAD_FASTMAP;
968 goto free_hdr;
969 }
970
971 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
972 if (ret && ret != UBI_IO_BITFLIPS) {
973 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
974 i, pnum);
975 if (ret > 0)
976 ret = UBI_BAD_FASTMAP;
977 goto free_hdr;
978 } else if (ret == UBI_IO_BITFLIPS)
979 fm->to_be_tortured[i] = 1;
980
981 image_seq = be32_to_cpu(ech->image_seq);
982 if (!ubi->image_seq)
983 ubi->image_seq = image_seq;
984
985 /*
986 * Older UBI implementations have image_seq set to zero, so
987 * we shouldn't fail if image_seq == 0.
988 */
989 if (image_seq && (image_seq != ubi->image_seq)) {
990 ubi_err(ubi, "wrong image seq:%d instead of %d",
991 be32_to_cpu(ech->image_seq), ubi->image_seq);
992 ret = UBI_BAD_FASTMAP;
993 goto free_hdr;
994 }
995
996 ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0);
997 if (ret && ret != UBI_IO_BITFLIPS) {
998 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
999 i, pnum);
1000 goto free_hdr;
1001 }
1002
1003 if (i == 0) {
1004 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
1005 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
1006 be32_to_cpu(vh->vol_id),
1007 UBI_FM_SB_VOLUME_ID);
1008 ret = UBI_BAD_FASTMAP;
1009 goto free_hdr;
1010 }
1011 } else {
1012 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1013 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1014 be32_to_cpu(vh->vol_id),
1015 UBI_FM_DATA_VOLUME_ID);
1016 ret = UBI_BAD_FASTMAP;
1017 goto free_hdr;
1018 }
1019 }
1020
1021 if (sqnum < be64_to_cpu(vh->sqnum))
1022 sqnum = be64_to_cpu(vh->sqnum);
1023
1024 ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
1025 pnum, 0, ubi->leb_size);
1026 if (ret && ret != UBI_IO_BITFLIPS) {
1027 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1028 "err: %i)", i, pnum, ret);
1029 goto free_hdr;
1030 }
1031 }
1032
1033 kfree(fmsb);
1034 fmsb = NULL;
1035
1036 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
1037 tmp_crc = be32_to_cpu(fmsb2->data_crc);
1038 fmsb2->data_crc = 0;
1039 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1040 if (crc != tmp_crc) {
1041 ubi_err(ubi, "fastmap data CRC is invalid");
1042 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1043 tmp_crc, crc);
1044 ret = UBI_BAD_FASTMAP;
1045 goto free_hdr;
1046 }
1047
1048 fmsb2->sqnum = sqnum;
1049
1050 fm->used_blocks = used_blocks;
1051
1052 ret = ubi_attach_fastmap(ubi, ai, fm);
1053 if (ret) {
1054 if (ret > 0)
1055 ret = UBI_BAD_FASTMAP;
1056 goto free_hdr;
1057 }
1058
1059 for (i = 0; i < used_blocks; i++) {
1060 struct ubi_wl_entry *e;
1061
1062 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1063 if (!e) {
1064 while (i--)
1065 kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
1066
1067 ret = -ENOMEM;
1068 goto free_hdr;
1069 }
1070
1071 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
1072 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
1073 fm->e[i] = e;
1074 }
1075
1076 ubi->fm = fm;
1077 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1078 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1079 ubi_msg(ubi, "attached by fastmap");
1080 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1081 ubi_msg(ubi, "fastmap WL pool size: %d",
1082 ubi->fm_wl_pool.max_size);
1083 ubi->fm_disabled = 0;
1084 ubi->fast_attach = 1;
1085
1086 ubi_free_vid_buf(vb);
1087 kfree(ech);
1088 out:
1089 up_write(&ubi->fm_protect);
1090 if (ret == UBI_BAD_FASTMAP)
1091 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1092 return ret;
1093
1094 free_hdr:
1095 ubi_free_vid_buf(vb);
1096 kfree(ech);
1097 free_fm_sb:
1098 kfree(fmsb);
1099 kfree(fm);
1100 goto out;
1101 }
1102
ubi_fastmap_init_checkmap(struct ubi_volume * vol,int leb_count)1103 int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count)
1104 {
1105 struct ubi_device *ubi = vol->ubi;
1106
1107 if (!ubi->fast_attach)
1108 return 0;
1109
1110 vol->checkmap = kcalloc(BITS_TO_LONGS(leb_count), sizeof(unsigned long),
1111 GFP_KERNEL);
1112 if (!vol->checkmap)
1113 return -ENOMEM;
1114
1115 return 0;
1116 }
1117
ubi_fastmap_destroy_checkmap(struct ubi_volume * vol)1118 void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol)
1119 {
1120 kfree(vol->checkmap);
1121 }
1122
1123 /**
1124 * ubi_write_fastmap - writes a fastmap.
1125 * @ubi: UBI device object
1126 * @new_fm: the to be written fastmap
1127 *
1128 * Returns 0 on success, < 0 indicates an internal error.
1129 */
ubi_write_fastmap(struct ubi_device * ubi,struct ubi_fastmap_layout * new_fm)1130 static int ubi_write_fastmap(struct ubi_device *ubi,
1131 struct ubi_fastmap_layout *new_fm)
1132 {
1133 size_t fm_pos = 0;
1134 void *fm_raw;
1135 struct ubi_fm_sb *fmsb;
1136 struct ubi_fm_hdr *fmh;
1137 struct ubi_fm_scan_pool *fmpl, *fmpl_wl;
1138 struct ubi_fm_ec *fec;
1139 struct ubi_fm_volhdr *fvh;
1140 struct ubi_fm_eba *feba;
1141 struct ubi_wl_entry *wl_e;
1142 struct ubi_volume *vol;
1143 struct ubi_vid_io_buf *avbuf, *dvbuf;
1144 struct ubi_vid_hdr *avhdr, *dvhdr;
1145 struct ubi_work *ubi_wrk;
1146 struct rb_node *tmp_rb;
1147 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1148 int scrub_peb_count, erase_peb_count;
1149 unsigned long *seen_pebs = NULL;
1150
1151 fm_raw = ubi->fm_buf;
1152 memset(ubi->fm_buf, 0, ubi->fm_size);
1153
1154 avbuf = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1155 if (!avbuf) {
1156 ret = -ENOMEM;
1157 goto out;
1158 }
1159
1160 dvbuf = new_fm_vbuf(ubi, UBI_FM_DATA_VOLUME_ID);
1161 if (!dvbuf) {
1162 ret = -ENOMEM;
1163 goto out_kfree;
1164 }
1165
1166 avhdr = ubi_get_vid_hdr(avbuf);
1167 dvhdr = ubi_get_vid_hdr(dvbuf);
1168
1169 seen_pebs = init_seen(ubi);
1170 if (IS_ERR(seen_pebs)) {
1171 ret = PTR_ERR(seen_pebs);
1172 goto out_kfree;
1173 }
1174
1175 spin_lock(&ubi->volumes_lock);
1176 spin_lock(&ubi->wl_lock);
1177
1178 fmsb = (struct ubi_fm_sb *)fm_raw;
1179 fm_pos += sizeof(*fmsb);
1180 ubi_assert(fm_pos <= ubi->fm_size);
1181
1182 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1183 fm_pos += sizeof(*fmh);
1184 ubi_assert(fm_pos <= ubi->fm_size);
1185
1186 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1187 fmsb->version = UBI_FM_FMT_VERSION;
1188 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1189 /* the max sqnum will be filled in while *reading* the fastmap */
1190 fmsb->sqnum = 0;
1191
1192 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1193 free_peb_count = 0;
1194 used_peb_count = 0;
1195 scrub_peb_count = 0;
1196 erase_peb_count = 0;
1197 vol_count = 0;
1198
1199 fmpl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1200 fm_pos += sizeof(*fmpl);
1201 fmpl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1202 fmpl->size = cpu_to_be16(ubi->fm_pool.size);
1203 fmpl->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1204
1205 for (i = 0; i < ubi->fm_pool.size; i++) {
1206 fmpl->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1207 set_seen(ubi, ubi->fm_pool.pebs[i], seen_pebs);
1208 }
1209
1210 fmpl_wl = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1211 fm_pos += sizeof(*fmpl_wl);
1212 fmpl_wl->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1213 fmpl_wl->size = cpu_to_be16(ubi->fm_wl_pool.size);
1214 fmpl_wl->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1215
1216 for (i = 0; i < ubi->fm_wl_pool.size; i++) {
1217 fmpl_wl->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1218 set_seen(ubi, ubi->fm_wl_pool.pebs[i], seen_pebs);
1219 }
1220
1221 ubi_for_each_free_peb(ubi, wl_e, tmp_rb) {
1222 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1223
1224 fec->pnum = cpu_to_be32(wl_e->pnum);
1225 set_seen(ubi, wl_e->pnum, seen_pebs);
1226 fec->ec = cpu_to_be32(wl_e->ec);
1227
1228 free_peb_count++;
1229 fm_pos += sizeof(*fec);
1230 ubi_assert(fm_pos <= ubi->fm_size);
1231 }
1232 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1233
1234 ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
1235 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1236
1237 fec->pnum = cpu_to_be32(wl_e->pnum);
1238 set_seen(ubi, wl_e->pnum, seen_pebs);
1239 fec->ec = cpu_to_be32(wl_e->ec);
1240
1241 used_peb_count++;
1242 fm_pos += sizeof(*fec);
1243 ubi_assert(fm_pos <= ubi->fm_size);
1244 }
1245
1246 ubi_for_each_protected_peb(ubi, i, wl_e) {
1247 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1248
1249 fec->pnum = cpu_to_be32(wl_e->pnum);
1250 set_seen(ubi, wl_e->pnum, seen_pebs);
1251 fec->ec = cpu_to_be32(wl_e->ec);
1252
1253 used_peb_count++;
1254 fm_pos += sizeof(*fec);
1255 ubi_assert(fm_pos <= ubi->fm_size);
1256 }
1257 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1258
1259 ubi_for_each_scrub_peb(ubi, wl_e, tmp_rb) {
1260 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1261
1262 fec->pnum = cpu_to_be32(wl_e->pnum);
1263 set_seen(ubi, wl_e->pnum, seen_pebs);
1264 fec->ec = cpu_to_be32(wl_e->ec);
1265
1266 scrub_peb_count++;
1267 fm_pos += sizeof(*fec);
1268 ubi_assert(fm_pos <= ubi->fm_size);
1269 }
1270 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1271
1272
1273 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1274 if (ubi_is_erase_work(ubi_wrk)) {
1275 wl_e = ubi_wrk->e;
1276 ubi_assert(wl_e);
1277
1278 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1279
1280 fec->pnum = cpu_to_be32(wl_e->pnum);
1281 set_seen(ubi, wl_e->pnum, seen_pebs);
1282 fec->ec = cpu_to_be32(wl_e->ec);
1283
1284 erase_peb_count++;
1285 fm_pos += sizeof(*fec);
1286 ubi_assert(fm_pos <= ubi->fm_size);
1287 }
1288 }
1289 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1290
1291 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1292 vol = ubi->volumes[i];
1293
1294 if (!vol)
1295 continue;
1296
1297 vol_count++;
1298
1299 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1300 fm_pos += sizeof(*fvh);
1301 ubi_assert(fm_pos <= ubi->fm_size);
1302
1303 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1304 fvh->vol_id = cpu_to_be32(vol->vol_id);
1305 fvh->vol_type = vol->vol_type;
1306 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1307 fvh->data_pad = cpu_to_be32(vol->data_pad);
1308 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1309
1310 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1311 vol->vol_type == UBI_STATIC_VOLUME);
1312
1313 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1314 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1315 ubi_assert(fm_pos <= ubi->fm_size);
1316
1317 for (j = 0; j < vol->reserved_pebs; j++) {
1318 struct ubi_eba_leb_desc ldesc;
1319
1320 ubi_eba_get_ldesc(vol, j, &ldesc);
1321 feba->pnum[j] = cpu_to_be32(ldesc.pnum);
1322 }
1323
1324 feba->reserved_pebs = cpu_to_be32(j);
1325 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1326 }
1327 fmh->vol_count = cpu_to_be32(vol_count);
1328 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1329
1330 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1331 avhdr->lnum = 0;
1332
1333 spin_unlock(&ubi->wl_lock);
1334 spin_unlock(&ubi->volumes_lock);
1335
1336 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1337 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avbuf);
1338 if (ret) {
1339 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1340 goto out_kfree;
1341 }
1342
1343 for (i = 0; i < new_fm->used_blocks; i++) {
1344 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1345 set_seen(ubi, new_fm->e[i]->pnum, seen_pebs);
1346 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1347 }
1348
1349 fmsb->data_crc = 0;
1350 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1351 ubi->fm_size));
1352
1353 for (i = 1; i < new_fm->used_blocks; i++) {
1354 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1355 dvhdr->lnum = cpu_to_be32(i);
1356 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1357 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1358 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf);
1359 if (ret) {
1360 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1361 new_fm->e[i]->pnum);
1362 goto out_kfree;
1363 }
1364 }
1365
1366 for (i = 0; i < new_fm->used_blocks; i++) {
1367 ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
1368 new_fm->e[i]->pnum, 0, ubi->leb_size);
1369 if (ret) {
1370 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1371 new_fm->e[i]->pnum);
1372 goto out_kfree;
1373 }
1374 }
1375
1376 ubi_assert(new_fm);
1377 ubi->fm = new_fm;
1378
1379 ret = self_check_seen(ubi, seen_pebs);
1380 dbg_bld("fastmap written!");
1381
1382 out_kfree:
1383 ubi_free_vid_buf(avbuf);
1384 ubi_free_vid_buf(dvbuf);
1385 free_seen(seen_pebs);
1386 out:
1387 return ret;
1388 }
1389
1390 /**
1391 * erase_block - Manually erase a PEB.
1392 * @ubi: UBI device object
1393 * @pnum: PEB to be erased
1394 *
1395 * Returns the new EC value on success, < 0 indicates an internal error.
1396 */
erase_block(struct ubi_device * ubi,int pnum)1397 static int erase_block(struct ubi_device *ubi, int pnum)
1398 {
1399 int ret;
1400 struct ubi_ec_hdr *ec_hdr;
1401 long long ec;
1402
1403 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1404 if (!ec_hdr)
1405 return -ENOMEM;
1406
1407 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1408 if (ret < 0)
1409 goto out;
1410 else if (ret && ret != UBI_IO_BITFLIPS) {
1411 ret = -EINVAL;
1412 goto out;
1413 }
1414
1415 ret = ubi_io_sync_erase(ubi, pnum, 0);
1416 if (ret < 0)
1417 goto out;
1418
1419 ec = be64_to_cpu(ec_hdr->ec);
1420 ec += ret;
1421 if (ec > UBI_MAX_ERASECOUNTER) {
1422 ret = -EINVAL;
1423 goto out;
1424 }
1425
1426 ec_hdr->ec = cpu_to_be64(ec);
1427 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1428 if (ret < 0)
1429 goto out;
1430
1431 ret = ec;
1432 out:
1433 kfree(ec_hdr);
1434 return ret;
1435 }
1436
1437 /**
1438 * invalidate_fastmap - destroys a fastmap.
1439 * @ubi: UBI device object
1440 *
1441 * This function ensures that upon next UBI attach a full scan
1442 * is issued. We need this if UBI is about to write a new fastmap
1443 * but is unable to do so. In this case we have two options:
1444 * a) Make sure that the current fastmap will not be usued upon
1445 * attach time and contine or b) fall back to RO mode to have the
1446 * current fastmap in a valid state.
1447 * Returns 0 on success, < 0 indicates an internal error.
1448 */
invalidate_fastmap(struct ubi_device * ubi)1449 static int invalidate_fastmap(struct ubi_device *ubi)
1450 {
1451 int ret;
1452 struct ubi_fastmap_layout *fm;
1453 struct ubi_wl_entry *e;
1454 struct ubi_vid_io_buf *vb = NULL;
1455 struct ubi_vid_hdr *vh;
1456
1457 if (!ubi->fm)
1458 return 0;
1459
1460 ubi->fm = NULL;
1461
1462 ret = -ENOMEM;
1463 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1464 if (!fm)
1465 goto out;
1466
1467 vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID);
1468 if (!vb)
1469 goto out_free_fm;
1470
1471 vh = ubi_get_vid_hdr(vb);
1472
1473 ret = -ENOSPC;
1474 e = ubi_wl_get_fm_peb(ubi, 1);
1475 if (!e)
1476 goto out_free_fm;
1477
1478 /*
1479 * Create fake fastmap such that UBI will fall back
1480 * to scanning mode.
1481 */
1482 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1483 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb);
1484 if (ret < 0) {
1485 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1486 goto out_free_fm;
1487 }
1488
1489 fm->used_blocks = 1;
1490 fm->e[0] = e;
1491
1492 ubi->fm = fm;
1493
1494 out:
1495 ubi_free_vid_buf(vb);
1496 return ret;
1497
1498 out_free_fm:
1499 kfree(fm);
1500 goto out;
1501 }
1502
1503 /**
1504 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1505 * WL sub-system.
1506 * @ubi: UBI device object
1507 * @fm: fastmap layout object
1508 */
return_fm_pebs(struct ubi_device * ubi,struct ubi_fastmap_layout * fm)1509 static void return_fm_pebs(struct ubi_device *ubi,
1510 struct ubi_fastmap_layout *fm)
1511 {
1512 int i;
1513
1514 if (!fm)
1515 return;
1516
1517 for (i = 0; i < fm->used_blocks; i++) {
1518 if (fm->e[i]) {
1519 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1520 fm->to_be_tortured[i]);
1521 fm->e[i] = NULL;
1522 }
1523 }
1524 }
1525
1526 /**
1527 * ubi_update_fastmap - will be called by UBI if a volume changes or
1528 * a fastmap pool becomes full.
1529 * @ubi: UBI device object
1530 *
1531 * Returns 0 on success, < 0 indicates an internal error.
1532 */
ubi_update_fastmap(struct ubi_device * ubi)1533 int ubi_update_fastmap(struct ubi_device *ubi)
1534 {
1535 int ret, i, j;
1536 struct ubi_fastmap_layout *new_fm, *old_fm;
1537 struct ubi_wl_entry *tmp_e;
1538
1539 down_write(&ubi->fm_protect);
1540 down_write(&ubi->work_sem);
1541 down_write(&ubi->fm_eba_sem);
1542
1543 ubi_refill_pools(ubi);
1544
1545 if (ubi->ro_mode || ubi->fm_disabled) {
1546 up_write(&ubi->fm_eba_sem);
1547 up_write(&ubi->work_sem);
1548 up_write(&ubi->fm_protect);
1549 return 0;
1550 }
1551
1552 ret = ubi_ensure_anchor_pebs(ubi);
1553 if (ret) {
1554 up_write(&ubi->fm_eba_sem);
1555 up_write(&ubi->work_sem);
1556 up_write(&ubi->fm_protect);
1557 return ret;
1558 }
1559
1560 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1561 if (!new_fm) {
1562 up_write(&ubi->fm_eba_sem);
1563 up_write(&ubi->work_sem);
1564 up_write(&ubi->fm_protect);
1565 return -ENOMEM;
1566 }
1567
1568 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
1569 old_fm = ubi->fm;
1570 ubi->fm = NULL;
1571
1572 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1573 ubi_err(ubi, "fastmap too large");
1574 ret = -ENOSPC;
1575 goto err;
1576 }
1577
1578 for (i = 1; i < new_fm->used_blocks; i++) {
1579 spin_lock(&ubi->wl_lock);
1580 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1581 spin_unlock(&ubi->wl_lock);
1582
1583 if (!tmp_e) {
1584 if (old_fm && old_fm->e[i]) {
1585 ret = erase_block(ubi, old_fm->e[i]->pnum);
1586 if (ret < 0) {
1587 ubi_err(ubi, "could not erase old fastmap PEB");
1588
1589 for (j = 1; j < i; j++) {
1590 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1591 j, 0);
1592 new_fm->e[j] = NULL;
1593 }
1594 goto err;
1595 }
1596 new_fm->e[i] = old_fm->e[i];
1597 old_fm->e[i] = NULL;
1598 } else {
1599 ubi_err(ubi, "could not get any free erase block");
1600
1601 for (j = 1; j < i; j++) {
1602 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1603 new_fm->e[j] = NULL;
1604 }
1605
1606 ret = -ENOSPC;
1607 goto err;
1608 }
1609 } else {
1610 new_fm->e[i] = tmp_e;
1611
1612 if (old_fm && old_fm->e[i]) {
1613 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1614 old_fm->to_be_tortured[i]);
1615 old_fm->e[i] = NULL;
1616 }
1617 }
1618 }
1619
1620 /* Old fastmap is larger than the new one */
1621 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1622 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1623 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1624 old_fm->to_be_tortured[i]);
1625 old_fm->e[i] = NULL;
1626 }
1627 }
1628
1629 spin_lock(&ubi->wl_lock);
1630 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1631 spin_unlock(&ubi->wl_lock);
1632
1633 if (old_fm) {
1634 /* no fresh anchor PEB was found, reuse the old one */
1635 if (!tmp_e) {
1636 ret = erase_block(ubi, old_fm->e[0]->pnum);
1637 if (ret < 0) {
1638 ubi_err(ubi, "could not erase old anchor PEB");
1639
1640 for (i = 1; i < new_fm->used_blocks; i++) {
1641 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1642 i, 0);
1643 new_fm->e[i] = NULL;
1644 }
1645 goto err;
1646 }
1647 new_fm->e[0] = old_fm->e[0];
1648 new_fm->e[0]->ec = ret;
1649 old_fm->e[0] = NULL;
1650 } else {
1651 /* we've got a new anchor PEB, return the old one */
1652 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1653 old_fm->to_be_tortured[0]);
1654 new_fm->e[0] = tmp_e;
1655 old_fm->e[0] = NULL;
1656 }
1657 } else {
1658 if (!tmp_e) {
1659 ubi_err(ubi, "could not find any anchor PEB");
1660
1661 for (i = 1; i < new_fm->used_blocks; i++) {
1662 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
1663 new_fm->e[i] = NULL;
1664 }
1665
1666 ret = -ENOSPC;
1667 goto err;
1668 }
1669 new_fm->e[0] = tmp_e;
1670 }
1671
1672 ret = ubi_write_fastmap(ubi, new_fm);
1673
1674 if (ret)
1675 goto err;
1676
1677 out_unlock:
1678 up_write(&ubi->fm_eba_sem);
1679 up_write(&ubi->work_sem);
1680 up_write(&ubi->fm_protect);
1681 kfree(old_fm);
1682 return ret;
1683
1684 err:
1685 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1686
1687 ret = invalidate_fastmap(ubi);
1688 if (ret < 0) {
1689 ubi_err(ubi, "Unable to invalidate current fastmap!");
1690 ubi_ro_mode(ubi);
1691 } else {
1692 return_fm_pebs(ubi, old_fm);
1693 return_fm_pebs(ubi, new_fm);
1694 ret = 0;
1695 }
1696
1697 kfree(new_fm);
1698 goto out_unlock;
1699 }
1700