1 /*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Copyright (c) 2014 sigma star gmbh
4 * Author: Richard Weinberger <richard@nod.at>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 */
16
17 /**
18 * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
19 * @wrk: the work description object
20 */
update_fastmap_work_fn(struct work_struct * wrk)21 static void update_fastmap_work_fn(struct work_struct *wrk)
22 {
23 struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
24
25 ubi_update_fastmap(ubi);
26 spin_lock(&ubi->wl_lock);
27 ubi->fm_work_scheduled = 0;
28 spin_unlock(&ubi->wl_lock);
29 }
30
31 /**
32 * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
33 * @root: the RB-tree where to look for
34 */
find_anchor_wl_entry(struct rb_root * root)35 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
36 {
37 struct rb_node *p;
38 struct ubi_wl_entry *e, *victim = NULL;
39 int max_ec = UBI_MAX_ERASECOUNTER;
40
41 ubi_rb_for_each_entry(p, e, root, u.rb) {
42 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
43 victim = e;
44 max_ec = e->ec;
45 }
46 }
47
48 return victim;
49 }
50
51 /**
52 * return_unused_pool_pebs - returns unused PEB to the free tree.
53 * @ubi: UBI device description object
54 * @pool: fastmap pool description object
55 */
return_unused_pool_pebs(struct ubi_device * ubi,struct ubi_fm_pool * pool)56 static void return_unused_pool_pebs(struct ubi_device *ubi,
57 struct ubi_fm_pool *pool)
58 {
59 int i;
60 struct ubi_wl_entry *e;
61
62 for (i = pool->used; i < pool->size; i++) {
63 e = ubi->lookuptbl[pool->pebs[i]];
64 wl_tree_add(e, &ubi->free);
65 ubi->free_count++;
66 }
67 }
68
anchor_pebs_available(struct rb_root * root)69 static int anchor_pebs_available(struct rb_root *root)
70 {
71 struct rb_node *p;
72 struct ubi_wl_entry *e;
73
74 ubi_rb_for_each_entry(p, e, root, u.rb)
75 if (e->pnum < UBI_FM_MAX_START)
76 return 1;
77
78 return 0;
79 }
80
81 /**
82 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
83 * @ubi: UBI device description object
84 * @anchor: This PEB will be used as anchor PEB by fastmap
85 *
86 * The function returns a physical erase block with a given maximal number
87 * and removes it from the wl subsystem.
88 * Must be called with wl_lock held!
89 */
ubi_wl_get_fm_peb(struct ubi_device * ubi,int anchor)90 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
91 {
92 struct ubi_wl_entry *e = NULL;
93
94 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
95 goto out;
96
97 if (anchor)
98 e = find_anchor_wl_entry(&ubi->free);
99 else
100 e = find_mean_wl_entry(ubi, &ubi->free);
101
102 if (!e)
103 goto out;
104
105 self_check_in_wl_tree(ubi, e, &ubi->free);
106
107 /* remove it from the free list,
108 * the wl subsystem does no longer know this erase block */
109 rb_erase(&e->u.rb, &ubi->free);
110 ubi->free_count--;
111 out:
112 return e;
113 }
114
115 /**
116 * ubi_refill_pools - refills all fastmap PEB pools.
117 * @ubi: UBI device description object
118 */
ubi_refill_pools(struct ubi_device * ubi)119 void ubi_refill_pools(struct ubi_device *ubi)
120 {
121 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
122 struct ubi_fm_pool *pool = &ubi->fm_pool;
123 struct ubi_wl_entry *e;
124 int enough;
125
126 spin_lock(&ubi->wl_lock);
127
128 return_unused_pool_pebs(ubi, wl_pool);
129 return_unused_pool_pebs(ubi, pool);
130
131 wl_pool->size = 0;
132 pool->size = 0;
133
134 for (;;) {
135 enough = 0;
136 if (pool->size < pool->max_size) {
137 if (!ubi->free.rb_node)
138 break;
139
140 e = wl_get_wle(ubi);
141 if (!e)
142 break;
143
144 pool->pebs[pool->size] = e->pnum;
145 pool->size++;
146 } else
147 enough++;
148
149 if (wl_pool->size < wl_pool->max_size) {
150 if (!ubi->free.rb_node ||
151 (ubi->free_count - ubi->beb_rsvd_pebs < 5))
152 break;
153
154 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
155 self_check_in_wl_tree(ubi, e, &ubi->free);
156 rb_erase(&e->u.rb, &ubi->free);
157 ubi->free_count--;
158
159 wl_pool->pebs[wl_pool->size] = e->pnum;
160 wl_pool->size++;
161 } else
162 enough++;
163
164 if (enough == 2)
165 break;
166 }
167
168 wl_pool->used = 0;
169 pool->used = 0;
170
171 spin_unlock(&ubi->wl_lock);
172 }
173
174 /**
175 * produce_free_peb - produce a free physical eraseblock.
176 * @ubi: UBI device description object
177 *
178 * This function tries to make a free PEB by means of synchronous execution of
179 * pending works. This may be needed if, for example the background thread is
180 * disabled. Returns zero in case of success and a negative error code in case
181 * of failure.
182 */
produce_free_peb(struct ubi_device * ubi)183 static int produce_free_peb(struct ubi_device *ubi)
184 {
185 int err;
186
187 while (!ubi->free.rb_node && ubi->works_count) {
188 dbg_wl("do one work synchronously");
189 err = do_work(ubi);
190
191 if (err)
192 return err;
193 }
194
195 return 0;
196 }
197
198 /**
199 * ubi_wl_get_peb - get a physical eraseblock.
200 * @ubi: UBI device description object
201 *
202 * This function returns a physical eraseblock in case of success and a
203 * negative error code in case of failure.
204 * Returns with ubi->fm_eba_sem held in read mode!
205 */
ubi_wl_get_peb(struct ubi_device * ubi)206 int ubi_wl_get_peb(struct ubi_device *ubi)
207 {
208 int ret, retried = 0;
209 struct ubi_fm_pool *pool = &ubi->fm_pool;
210 struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
211
212 again:
213 down_read(&ubi->fm_eba_sem);
214 spin_lock(&ubi->wl_lock);
215
216 /* We check here also for the WL pool because at this point we can
217 * refill the WL pool synchronous. */
218 if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
219 spin_unlock(&ubi->wl_lock);
220 up_read(&ubi->fm_eba_sem);
221 ret = ubi_update_fastmap(ubi);
222 if (ret) {
223 ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
224 down_read(&ubi->fm_eba_sem);
225 return -ENOSPC;
226 }
227 down_read(&ubi->fm_eba_sem);
228 spin_lock(&ubi->wl_lock);
229 }
230
231 if (pool->used == pool->size) {
232 spin_unlock(&ubi->wl_lock);
233 if (retried) {
234 ubi_err(ubi, "Unable to get a free PEB from user WL pool");
235 ret = -ENOSPC;
236 goto out;
237 }
238 retried = 1;
239 up_read(&ubi->fm_eba_sem);
240 ret = produce_free_peb(ubi);
241 if (ret < 0) {
242 down_read(&ubi->fm_eba_sem);
243 goto out;
244 }
245 goto again;
246 }
247
248 ubi_assert(pool->used < pool->size);
249 ret = pool->pebs[pool->used++];
250 prot_queue_add(ubi, ubi->lookuptbl[ret]);
251 spin_unlock(&ubi->wl_lock);
252 out:
253 return ret;
254 }
255
256 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
257 *
258 * @ubi: UBI device description object
259 */
get_peb_for_wl(struct ubi_device * ubi)260 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
261 {
262 struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
263 int pnum;
264
265 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
266
267 if (pool->used == pool->size) {
268 /* We cannot update the fastmap here because this
269 * function is called in atomic context.
270 * Let's fail here and refill/update it as soon as possible. */
271 if (!ubi->fm_work_scheduled) {
272 ubi->fm_work_scheduled = 1;
273 schedule_work(&ubi->fm_work);
274 }
275 return NULL;
276 }
277
278 pnum = pool->pebs[pool->used++];
279 return ubi->lookuptbl[pnum];
280 }
281
282 /**
283 * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
284 * @ubi: UBI device description object
285 */
ubi_ensure_anchor_pebs(struct ubi_device * ubi)286 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
287 {
288 struct ubi_work *wrk;
289
290 spin_lock(&ubi->wl_lock);
291 if (ubi->wl_scheduled) {
292 spin_unlock(&ubi->wl_lock);
293 return 0;
294 }
295 ubi->wl_scheduled = 1;
296 spin_unlock(&ubi->wl_lock);
297
298 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
299 if (!wrk) {
300 spin_lock(&ubi->wl_lock);
301 ubi->wl_scheduled = 0;
302 spin_unlock(&ubi->wl_lock);
303 return -ENOMEM;
304 }
305
306 wrk->anchor = 1;
307 wrk->func = &wear_leveling_worker;
308 __schedule_ubi_work(ubi, wrk);
309 return 0;
310 }
311
312 /**
313 * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
314 * sub-system.
315 * see: ubi_wl_put_peb()
316 *
317 * @ubi: UBI device description object
318 * @fm_e: physical eraseblock to return
319 * @lnum: the last used logical eraseblock number for the PEB
320 * @torture: if this physical eraseblock has to be tortured
321 */
ubi_wl_put_fm_peb(struct ubi_device * ubi,struct ubi_wl_entry * fm_e,int lnum,int torture)322 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
323 int lnum, int torture)
324 {
325 struct ubi_wl_entry *e;
326 int vol_id, pnum = fm_e->pnum;
327
328 dbg_wl("PEB %d", pnum);
329
330 ubi_assert(pnum >= 0);
331 ubi_assert(pnum < ubi->peb_count);
332
333 spin_lock(&ubi->wl_lock);
334 e = ubi->lookuptbl[pnum];
335
336 /* This can happen if we recovered from a fastmap the very
337 * first time and writing now a new one. In this case the wl system
338 * has never seen any PEB used by the original fastmap.
339 */
340 if (!e) {
341 e = fm_e;
342 ubi_assert(e->ec >= 0);
343 ubi->lookuptbl[pnum] = e;
344 }
345
346 spin_unlock(&ubi->wl_lock);
347
348 vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
349 return schedule_erase(ubi, e, vol_id, lnum, torture, true);
350 }
351
352 /**
353 * ubi_is_erase_work - checks whether a work is erase work.
354 * @wrk: The work object to be checked
355 */
ubi_is_erase_work(struct ubi_work * wrk)356 int ubi_is_erase_work(struct ubi_work *wrk)
357 {
358 return wrk->func == erase_worker;
359 }
360
ubi_fastmap_close(struct ubi_device * ubi)361 static void ubi_fastmap_close(struct ubi_device *ubi)
362 {
363 int i;
364
365 return_unused_pool_pebs(ubi, &ubi->fm_pool);
366 return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
367
368 if (ubi->fm) {
369 for (i = 0; i < ubi->fm->used_blocks; i++)
370 kfree(ubi->fm->e[i]);
371 }
372 kfree(ubi->fm);
373 }
374
375 /**
376 * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
377 * See find_mean_wl_entry()
378 *
379 * @ubi: UBI device description object
380 * @e: physical eraseblock to return
381 * @root: RB tree to test against.
382 */
may_reserve_for_fm(struct ubi_device * ubi,struct ubi_wl_entry * e,struct rb_root * root)383 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
384 struct ubi_wl_entry *e,
385 struct rb_root *root) {
386 if (e && !ubi->fm_disabled && !ubi->fm &&
387 e->pnum < UBI_FM_MAX_START)
388 e = rb_entry(rb_next(root->rb_node),
389 struct ubi_wl_entry, u.rb);
390
391 return e;
392 }
393