Merge branch 'linux-3.17' of git://anongit.freedesktop.org/git/nouveau/linux-2.6...
[sfrench/cifs-2.6.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
44  * bad.
45  *
46  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
47  * in a physical eraseblock, it has to be moved. Technically this is the same
48  * as moving it for wear-leveling reasons.
49  *
50  * As it was said, for the UBI sub-system all physical eraseblocks are either
51  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
52  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
53  * RB-trees, as well as (temporarily) in the @wl->pq queue.
54  *
55  * When the WL sub-system returns a physical eraseblock, the physical
56  * eraseblock is protected from being moved for some "time". For this reason,
57  * the physical eraseblock is not directly moved from the @wl->free tree to the
58  * @wl->used tree. There is a protection queue in between where this
59  * physical eraseblock is temporarily stored (@wl->pq).
60  *
61  * All this protection stuff is needed because:
62  *  o we don't want to move physical eraseblocks just after we have given them
63  *    to the user; instead, we first want to let users fill them up with data;
64  *
65  *  o there is a chance that the user will put the physical eraseblock very
66  *    soon, so it makes sense not to move it for some time, but wait.
67  *
68  * Physical eraseblocks stay protected only for limited time. But the "time" is
69  * measured in erase cycles in this case. This is implemented with help of the
70  * protection queue. Eraseblocks are put to the tail of this queue when they
71  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
72  * head of the queue on each erase operation (for any eraseblock). So the
73  * length of the queue defines how may (global) erase cycles PEBs are protected.
74  *
75  * To put it differently, each physical eraseblock has 2 main states: free and
76  * used. The former state corresponds to the @wl->free tree. The latter state
77  * is split up on several sub-states:
78  * o the WL movement is allowed (@wl->used tree);
79  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
80  *   erroneous - e.g., there was a read error;
81  * o the WL movement is temporarily prohibited (@wl->pq queue);
82  * o scrubbing is needed (@wl->scrub tree).
83  *
84  * Depending on the sub-state, wear-leveling entries of the used physical
85  * eraseblocks may be kept in one of those structures.
86  *
87  * Note, in this implementation, we keep a small in-RAM object for each physical
88  * eraseblock. This is surely not a scalable solution. But it appears to be good
89  * enough for moderately large flashes and it is simple. In future, one may
90  * re-work this sub-system and make it more scalable.
91  *
92  * At the moment this sub-system does not utilize the sequence number, which
93  * was introduced relatively recently. But it would be wise to do this because
94  * the sequence number of a logical eraseblock characterizes how old is it. For
95  * example, when we move a PEB with low erase counter, and we need to pick the
96  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
97  * pick target PEB with an average EC if our PEB is not very "old". This is a
98  * room for future re-works of the WL sub-system.
99  */
100
101 #include <linux/slab.h>
102 #include <linux/crc32.h>
103 #include <linux/freezer.h>
104 #include <linux/kthread.h>
105 #include "ubi.h"
106
107 /* Number of physical eraseblocks reserved for wear-leveling purposes */
108 #define WL_RESERVED_PEBS 1
109
110 /*
111  * Maximum difference between two erase counters. If this threshold is
112  * exceeded, the WL sub-system starts moving data from used physical
113  * eraseblocks with low erase counter to free physical eraseblocks with high
114  * erase counter.
115  */
116 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
117
118 /*
119  * When a physical eraseblock is moved, the WL sub-system has to pick the target
120  * physical eraseblock to move to. The simplest way would be just to pick the
121  * one with the highest erase counter. But in certain workloads this could lead
122  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
123  * situation when the picked physical eraseblock is constantly erased after the
124  * data is written to it. So, we have a constant which limits the highest erase
125  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
126  * does not pick eraseblocks with erase counter greater than the lowest erase
127  * counter plus %WL_FREE_MAX_DIFF.
128  */
129 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
130
131 /*
132  * Maximum number of consecutive background thread failures which is enough to
133  * switch to read-only mode.
134  */
135 #define WL_MAX_FAILURES 32
136
137 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
138 static int self_check_in_wl_tree(const struct ubi_device *ubi,
139                                  struct ubi_wl_entry *e, struct rb_root *root);
140 static int self_check_in_pq(const struct ubi_device *ubi,
141                             struct ubi_wl_entry *e);
142
143 #ifdef CONFIG_MTD_UBI_FASTMAP
144 /**
145  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
146  * @wrk: the work description object
147  */
148 static void update_fastmap_work_fn(struct work_struct *wrk)
149 {
150         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151         ubi_update_fastmap(ubi);
152 }
153
154 /**
155  *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
156  *  @ubi: UBI device description object
157  *  @pnum: the to be checked PEB
158  */
159 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
160 {
161         int i;
162
163         if (!ubi->fm)
164                 return 0;
165
166         for (i = 0; i < ubi->fm->used_blocks; i++)
167                 if (ubi->fm->e[i]->pnum == pnum)
168                         return 1;
169
170         return 0;
171 }
172 #else
173 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
174 {
175         return 0;
176 }
177 #endif
178
179 /**
180  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
181  * @e: the wear-leveling entry to add
182  * @root: the root of the tree
183  *
184  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
185  * the @ubi->used and @ubi->free RB-trees.
186  */
187 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
188 {
189         struct rb_node **p, *parent = NULL;
190
191         p = &root->rb_node;
192         while (*p) {
193                 struct ubi_wl_entry *e1;
194
195                 parent = *p;
196                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
197
198                 if (e->ec < e1->ec)
199                         p = &(*p)->rb_left;
200                 else if (e->ec > e1->ec)
201                         p = &(*p)->rb_right;
202                 else {
203                         ubi_assert(e->pnum != e1->pnum);
204                         if (e->pnum < e1->pnum)
205                                 p = &(*p)->rb_left;
206                         else
207                                 p = &(*p)->rb_right;
208                 }
209         }
210
211         rb_link_node(&e->u.rb, parent, p);
212         rb_insert_color(&e->u.rb, root);
213 }
214
215 /**
216  * do_work - do one pending work.
217  * @ubi: UBI device description object
218  *
219  * This function returns zero in case of success and a negative error code in
220  * case of failure.
221  */
222 static int do_work(struct ubi_device *ubi)
223 {
224         int err;
225         struct ubi_work *wrk;
226
227         cond_resched();
228
229         /*
230          * @ubi->work_sem is used to synchronize with the workers. Workers take
231          * it in read mode, so many of them may be doing works at a time. But
232          * the queue flush code has to be sure the whole queue of works is
233          * done, and it takes the mutex in write mode.
234          */
235         down_read(&ubi->work_sem);
236         spin_lock(&ubi->wl_lock);
237         if (list_empty(&ubi->works)) {
238                 spin_unlock(&ubi->wl_lock);
239                 up_read(&ubi->work_sem);
240                 return 0;
241         }
242
243         wrk = list_entry(ubi->works.next, struct ubi_work, list);
244         list_del(&wrk->list);
245         ubi->works_count -= 1;
246         ubi_assert(ubi->works_count >= 0);
247         spin_unlock(&ubi->wl_lock);
248
249         /*
250          * Call the worker function. Do not touch the work structure
251          * after this call as it will have been freed or reused by that
252          * time by the worker function.
253          */
254         err = wrk->func(ubi, wrk, 0);
255         if (err)
256                 ubi_err("work failed with error code %d", err);
257         up_read(&ubi->work_sem);
258
259         return err;
260 }
261
262 /**
263  * produce_free_peb - produce a free physical eraseblock.
264  * @ubi: UBI device description object
265  *
266  * This function tries to make a free PEB by means of synchronous execution of
267  * pending works. This may be needed if, for example the background thread is
268  * disabled. Returns zero in case of success and a negative error code in case
269  * of failure.
270  */
271 static int produce_free_peb(struct ubi_device *ubi)
272 {
273         int err;
274
275         while (!ubi->free.rb_node) {
276                 spin_unlock(&ubi->wl_lock);
277
278                 dbg_wl("do one work synchronously");
279                 err = do_work(ubi);
280
281                 spin_lock(&ubi->wl_lock);
282                 if (err)
283                         return err;
284         }
285
286         return 0;
287 }
288
289 /**
290  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
291  * @e: the wear-leveling entry to check
292  * @root: the root of the tree
293  *
294  * This function returns non-zero if @e is in the @root RB-tree and zero if it
295  * is not.
296  */
297 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
298 {
299         struct rb_node *p;
300
301         p = root->rb_node;
302         while (p) {
303                 struct ubi_wl_entry *e1;
304
305                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
306
307                 if (e->pnum == e1->pnum) {
308                         ubi_assert(e == e1);
309                         return 1;
310                 }
311
312                 if (e->ec < e1->ec)
313                         p = p->rb_left;
314                 else if (e->ec > e1->ec)
315                         p = p->rb_right;
316                 else {
317                         ubi_assert(e->pnum != e1->pnum);
318                         if (e->pnum < e1->pnum)
319                                 p = p->rb_left;
320                         else
321                                 p = p->rb_right;
322                 }
323         }
324
325         return 0;
326 }
327
328 /**
329  * prot_queue_add - add physical eraseblock to the protection queue.
330  * @ubi: UBI device description object
331  * @e: the physical eraseblock to add
332  *
333  * This function adds @e to the tail of the protection queue @ubi->pq, where
334  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
335  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
336  * be locked.
337  */
338 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
339 {
340         int pq_tail = ubi->pq_head - 1;
341
342         if (pq_tail < 0)
343                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
344         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
345         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
346         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
347 }
348
349 /**
350  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
351  * @ubi: UBI device description object
352  * @root: the RB-tree where to look for
353  * @diff: maximum possible difference from the smallest erase counter
354  *
355  * This function looks for a wear leveling entry with erase counter closest to
356  * min + @diff, where min is the smallest erase counter.
357  */
358 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
359                                           struct rb_root *root, int diff)
360 {
361         struct rb_node *p;
362         struct ubi_wl_entry *e, *prev_e = NULL;
363         int max;
364
365         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
366         max = e->ec + diff;
367
368         p = root->rb_node;
369         while (p) {
370                 struct ubi_wl_entry *e1;
371
372                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
373                 if (e1->ec >= max)
374                         p = p->rb_left;
375                 else {
376                         p = p->rb_right;
377                         prev_e = e;
378                         e = e1;
379                 }
380         }
381
382         /* If no fastmap has been written and this WL entry can be used
383          * as anchor PEB, hold it back and return the second best WL entry
384          * such that fastmap can use the anchor PEB later. */
385         if (prev_e && !ubi->fm_disabled &&
386             !ubi->fm && e->pnum < UBI_FM_MAX_START)
387                 return prev_e;
388
389         return e;
390 }
391
392 /**
393  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
394  * @ubi: UBI device description object
395  * @root: the RB-tree where to look for
396  *
397  * This function looks for a wear leveling entry with medium erase counter,
398  * but not greater or equivalent than the lowest erase counter plus
399  * %WL_FREE_MAX_DIFF/2.
400  */
401 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
402                                                struct rb_root *root)
403 {
404         struct ubi_wl_entry *e, *first, *last;
405
406         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
407         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
408
409         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
410                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
411
412 #ifdef CONFIG_MTD_UBI_FASTMAP
413                 /* If no fastmap has been written and this WL entry can be used
414                  * as anchor PEB, hold it back and return the second best
415                  * WL entry such that fastmap can use the anchor PEB later. */
416                 if (e && !ubi->fm_disabled && !ubi->fm &&
417                     e->pnum < UBI_FM_MAX_START)
418                         e = rb_entry(rb_next(root->rb_node),
419                                      struct ubi_wl_entry, u.rb);
420 #endif
421         } else
422                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
423
424         return e;
425 }
426
427 #ifdef CONFIG_MTD_UBI_FASTMAP
428 /**
429  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
430  * @root: the RB-tree where to look for
431  */
432 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
433 {
434         struct rb_node *p;
435         struct ubi_wl_entry *e, *victim = NULL;
436         int max_ec = UBI_MAX_ERASECOUNTER;
437
438         ubi_rb_for_each_entry(p, e, root, u.rb) {
439                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
440                         victim = e;
441                         max_ec = e->ec;
442                 }
443         }
444
445         return victim;
446 }
447
448 static int anchor_pebs_avalible(struct rb_root *root)
449 {
450         struct rb_node *p;
451         struct ubi_wl_entry *e;
452
453         ubi_rb_for_each_entry(p, e, root, u.rb)
454                 if (e->pnum < UBI_FM_MAX_START)
455                         return 1;
456
457         return 0;
458 }
459
460 /**
461  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
462  * @ubi: UBI device description object
463  * @anchor: This PEB will be used as anchor PEB by fastmap
464  *
465  * The function returns a physical erase block with a given maximal number
466  * and removes it from the wl subsystem.
467  * Must be called with wl_lock held!
468  */
469 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470 {
471         struct ubi_wl_entry *e = NULL;
472
473         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474                 goto out;
475
476         if (anchor)
477                 e = find_anchor_wl_entry(&ubi->free);
478         else
479                 e = find_mean_wl_entry(ubi, &ubi->free);
480
481         if (!e)
482                 goto out;
483
484         self_check_in_wl_tree(ubi, e, &ubi->free);
485
486         /* remove it from the free list,
487          * the wl subsystem does no longer know this erase block */
488         rb_erase(&e->u.rb, &ubi->free);
489         ubi->free_count--;
490 out:
491         return e;
492 }
493 #endif
494
495 /**
496  * __wl_get_peb - get a physical eraseblock.
497  * @ubi: UBI device description object
498  *
499  * This function returns a physical eraseblock in case of success and a
500  * negative error code in case of failure.
501  */
502 static int __wl_get_peb(struct ubi_device *ubi)
503 {
504         int err;
505         struct ubi_wl_entry *e;
506
507 retry:
508         if (!ubi->free.rb_node) {
509                 if (ubi->works_count == 0) {
510                         ubi_err("no free eraseblocks");
511                         ubi_assert(list_empty(&ubi->works));
512                         return -ENOSPC;
513                 }
514
515                 err = produce_free_peb(ubi);
516                 if (err < 0)
517                         return err;
518                 goto retry;
519         }
520
521         e = find_mean_wl_entry(ubi, &ubi->free);
522         if (!e) {
523                 ubi_err("no free eraseblocks");
524                 return -ENOSPC;
525         }
526
527         self_check_in_wl_tree(ubi, e, &ubi->free);
528
529         /*
530          * Move the physical eraseblock to the protection queue where it will
531          * be protected from being moved for some time.
532          */
533         rb_erase(&e->u.rb, &ubi->free);
534         ubi->free_count--;
535         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
536 #ifndef CONFIG_MTD_UBI_FASTMAP
537         /* We have to enqueue e only if fastmap is disabled,
538          * is fastmap enabled prot_queue_add() will be called by
539          * ubi_wl_get_peb() after removing e from the pool. */
540         prot_queue_add(ubi, e);
541 #endif
542         return e->pnum;
543 }
544
545 #ifdef CONFIG_MTD_UBI_FASTMAP
546 /**
547  * return_unused_pool_pebs - returns unused PEB to the free tree.
548  * @ubi: UBI device description object
549  * @pool: fastmap pool description object
550  */
551 static void return_unused_pool_pebs(struct ubi_device *ubi,
552                                     struct ubi_fm_pool *pool)
553 {
554         int i;
555         struct ubi_wl_entry *e;
556
557         for (i = pool->used; i < pool->size; i++) {
558                 e = ubi->lookuptbl[pool->pebs[i]];
559                 wl_tree_add(e, &ubi->free);
560                 ubi->free_count++;
561         }
562 }
563
564 /**
565  * refill_wl_pool - refills all the fastmap pool used by the
566  * WL sub-system.
567  * @ubi: UBI device description object
568  */
569 static void refill_wl_pool(struct ubi_device *ubi)
570 {
571         struct ubi_wl_entry *e;
572         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
573
574         return_unused_pool_pebs(ubi, pool);
575
576         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
577                 if (!ubi->free.rb_node ||
578                    (ubi->free_count - ubi->beb_rsvd_pebs < 5))
579                         break;
580
581                 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
582                 self_check_in_wl_tree(ubi, e, &ubi->free);
583                 rb_erase(&e->u.rb, &ubi->free);
584                 ubi->free_count--;
585
586                 pool->pebs[pool->size] = e->pnum;
587         }
588         pool->used = 0;
589 }
590
591 /**
592  * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
593  * @ubi: UBI device description object
594  */
595 static void refill_wl_user_pool(struct ubi_device *ubi)
596 {
597         struct ubi_fm_pool *pool = &ubi->fm_pool;
598
599         return_unused_pool_pebs(ubi, pool);
600
601         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
602                 pool->pebs[pool->size] = __wl_get_peb(ubi);
603                 if (pool->pebs[pool->size] < 0)
604                         break;
605         }
606         pool->used = 0;
607 }
608
609 /**
610  * ubi_refill_pools - refills all fastmap PEB pools.
611  * @ubi: UBI device description object
612  */
613 void ubi_refill_pools(struct ubi_device *ubi)
614 {
615         spin_lock(&ubi->wl_lock);
616         refill_wl_pool(ubi);
617         refill_wl_user_pool(ubi);
618         spin_unlock(&ubi->wl_lock);
619 }
620
621 /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
622  * the fastmap pool.
623  */
624 int ubi_wl_get_peb(struct ubi_device *ubi)
625 {
626         int ret;
627         struct ubi_fm_pool *pool = &ubi->fm_pool;
628         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
629
630         if (!pool->size || !wl_pool->size || pool->used == pool->size ||
631             wl_pool->used == wl_pool->size)
632                 ubi_update_fastmap(ubi);
633
634         /* we got not a single free PEB */
635         if (!pool->size)
636                 ret = -ENOSPC;
637         else {
638                 spin_lock(&ubi->wl_lock);
639                 ret = pool->pebs[pool->used++];
640                 prot_queue_add(ubi, ubi->lookuptbl[ret]);
641                 spin_unlock(&ubi->wl_lock);
642         }
643
644         return ret;
645 }
646
647 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
648  *
649  * @ubi: UBI device description object
650  */
651 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
652 {
653         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
654         int pnum;
655
656         if (pool->used == pool->size || !pool->size) {
657                 /* We cannot update the fastmap here because this
658                  * function is called in atomic context.
659                  * Let's fail here and refill/update it as soon as possible. */
660                 schedule_work(&ubi->fm_work);
661                 return NULL;
662         } else {
663                 pnum = pool->pebs[pool->used++];
664                 return ubi->lookuptbl[pnum];
665         }
666 }
667 #else
668 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
669 {
670         struct ubi_wl_entry *e;
671
672         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
673         self_check_in_wl_tree(ubi, e, &ubi->free);
674         ubi->free_count--;
675         ubi_assert(ubi->free_count >= 0);
676         rb_erase(&e->u.rb, &ubi->free);
677
678         return e;
679 }
680
681 int ubi_wl_get_peb(struct ubi_device *ubi)
682 {
683         int peb, err;
684
685         spin_lock(&ubi->wl_lock);
686         peb = __wl_get_peb(ubi);
687         spin_unlock(&ubi->wl_lock);
688
689         if (peb < 0)
690                 return peb;
691
692         err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
693                                     ubi->peb_size - ubi->vid_hdr_aloffset);
694         if (err) {
695                 ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
696                 return err;
697         }
698
699         return peb;
700 }
701 #endif
702
703 /**
704  * prot_queue_del - remove a physical eraseblock from the protection queue.
705  * @ubi: UBI device description object
706  * @pnum: the physical eraseblock to remove
707  *
708  * This function deletes PEB @pnum from the protection queue and returns zero
709  * in case of success and %-ENODEV if the PEB was not found.
710  */
711 static int prot_queue_del(struct ubi_device *ubi, int pnum)
712 {
713         struct ubi_wl_entry *e;
714
715         e = ubi->lookuptbl[pnum];
716         if (!e)
717                 return -ENODEV;
718
719         if (self_check_in_pq(ubi, e))
720                 return -ENODEV;
721
722         list_del(&e->u.list);
723         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
724         return 0;
725 }
726
727 /**
728  * sync_erase - synchronously erase a physical eraseblock.
729  * @ubi: UBI device description object
730  * @e: the the physical eraseblock to erase
731  * @torture: if the physical eraseblock has to be tortured
732  *
733  * This function returns zero in case of success and a negative error code in
734  * case of failure.
735  */
736 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
737                       int torture)
738 {
739         int err;
740         struct ubi_ec_hdr *ec_hdr;
741         unsigned long long ec = e->ec;
742
743         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
744
745         err = self_check_ec(ubi, e->pnum, e->ec);
746         if (err)
747                 return -EINVAL;
748
749         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
750         if (!ec_hdr)
751                 return -ENOMEM;
752
753         err = ubi_io_sync_erase(ubi, e->pnum, torture);
754         if (err < 0)
755                 goto out_free;
756
757         ec += err;
758         if (ec > UBI_MAX_ERASECOUNTER) {
759                 /*
760                  * Erase counter overflow. Upgrade UBI and use 64-bit
761                  * erase counters internally.
762                  */
763                 ubi_err("erase counter overflow at PEB %d, EC %llu",
764                         e->pnum, ec);
765                 err = -EINVAL;
766                 goto out_free;
767         }
768
769         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
770
771         ec_hdr->ec = cpu_to_be64(ec);
772
773         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
774         if (err)
775                 goto out_free;
776
777         e->ec = ec;
778         spin_lock(&ubi->wl_lock);
779         if (e->ec > ubi->max_ec)
780                 ubi->max_ec = e->ec;
781         spin_unlock(&ubi->wl_lock);
782
783 out_free:
784         kfree(ec_hdr);
785         return err;
786 }
787
788 /**
789  * serve_prot_queue - check if it is time to stop protecting PEBs.
790  * @ubi: UBI device description object
791  *
792  * This function is called after each erase operation and removes PEBs from the
793  * tail of the protection queue. These PEBs have been protected for long enough
794  * and should be moved to the used tree.
795  */
796 static void serve_prot_queue(struct ubi_device *ubi)
797 {
798         struct ubi_wl_entry *e, *tmp;
799         int count;
800
801         /*
802          * There may be several protected physical eraseblock to remove,
803          * process them all.
804          */
805 repeat:
806         count = 0;
807         spin_lock(&ubi->wl_lock);
808         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
809                 dbg_wl("PEB %d EC %d protection over, move to used tree",
810                         e->pnum, e->ec);
811
812                 list_del(&e->u.list);
813                 wl_tree_add(e, &ubi->used);
814                 if (count++ > 32) {
815                         /*
816                          * Let's be nice and avoid holding the spinlock for
817                          * too long.
818                          */
819                         spin_unlock(&ubi->wl_lock);
820                         cond_resched();
821                         goto repeat;
822                 }
823         }
824
825         ubi->pq_head += 1;
826         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
827                 ubi->pq_head = 0;
828         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
829         spin_unlock(&ubi->wl_lock);
830 }
831
832 /**
833  * __schedule_ubi_work - schedule a work.
834  * @ubi: UBI device description object
835  * @wrk: the work to schedule
836  *
837  * This function adds a work defined by @wrk to the tail of the pending works
838  * list. Can only be used of ubi->work_sem is already held in read mode!
839  */
840 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
841 {
842         spin_lock(&ubi->wl_lock);
843         list_add_tail(&wrk->list, &ubi->works);
844         ubi_assert(ubi->works_count >= 0);
845         ubi->works_count += 1;
846         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
847                 wake_up_process(ubi->bgt_thread);
848         spin_unlock(&ubi->wl_lock);
849 }
850
851 /**
852  * schedule_ubi_work - schedule a work.
853  * @ubi: UBI device description object
854  * @wrk: the work to schedule
855  *
856  * This function adds a work defined by @wrk to the tail of the pending works
857  * list.
858  */
859 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
860 {
861         down_read(&ubi->work_sem);
862         __schedule_ubi_work(ubi, wrk);
863         up_read(&ubi->work_sem);
864 }
865
866 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
867                         int cancel);
868
869 #ifdef CONFIG_MTD_UBI_FASTMAP
870 /**
871  * ubi_is_erase_work - checks whether a work is erase work.
872  * @wrk: The work object to be checked
873  */
874 int ubi_is_erase_work(struct ubi_work *wrk)
875 {
876         return wrk->func == erase_worker;
877 }
878 #endif
879
880 /**
881  * schedule_erase - schedule an erase work.
882  * @ubi: UBI device description object
883  * @e: the WL entry of the physical eraseblock to erase
884  * @vol_id: the volume ID that last used this PEB
885  * @lnum: the last used logical eraseblock number for the PEB
886  * @torture: if the physical eraseblock has to be tortured
887  *
888  * This function returns zero in case of success and a %-ENOMEM in case of
889  * failure.
890  */
891 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
892                           int vol_id, int lnum, int torture)
893 {
894         struct ubi_work *wl_wrk;
895
896         ubi_assert(e);
897         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
898
899         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
900                e->pnum, e->ec, torture);
901
902         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
903         if (!wl_wrk)
904                 return -ENOMEM;
905
906         wl_wrk->func = &erase_worker;
907         wl_wrk->e = e;
908         wl_wrk->vol_id = vol_id;
909         wl_wrk->lnum = lnum;
910         wl_wrk->torture = torture;
911
912         schedule_ubi_work(ubi, wl_wrk);
913         return 0;
914 }
915
916 /**
917  * do_sync_erase - run the erase worker synchronously.
918  * @ubi: UBI device description object
919  * @e: the WL entry of the physical eraseblock to erase
920  * @vol_id: the volume ID that last used this PEB
921  * @lnum: the last used logical eraseblock number for the PEB
922  * @torture: if the physical eraseblock has to be tortured
923  *
924  */
925 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
926                          int vol_id, int lnum, int torture)
927 {
928         struct ubi_work *wl_wrk;
929
930         dbg_wl("sync erase of PEB %i", e->pnum);
931
932         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
933         if (!wl_wrk)
934                 return -ENOMEM;
935
936         wl_wrk->e = e;
937         wl_wrk->vol_id = vol_id;
938         wl_wrk->lnum = lnum;
939         wl_wrk->torture = torture;
940
941         return erase_worker(ubi, wl_wrk, 0);
942 }
943
944 #ifdef CONFIG_MTD_UBI_FASTMAP
945 /**
946  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
947  * sub-system.
948  * see: ubi_wl_put_peb()
949  *
950  * @ubi: UBI device description object
951  * @fm_e: physical eraseblock to return
952  * @lnum: the last used logical eraseblock number for the PEB
953  * @torture: if this physical eraseblock has to be tortured
954  */
955 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
956                       int lnum, int torture)
957 {
958         struct ubi_wl_entry *e;
959         int vol_id, pnum = fm_e->pnum;
960
961         dbg_wl("PEB %d", pnum);
962
963         ubi_assert(pnum >= 0);
964         ubi_assert(pnum < ubi->peb_count);
965
966         spin_lock(&ubi->wl_lock);
967         e = ubi->lookuptbl[pnum];
968
969         /* This can happen if we recovered from a fastmap the very
970          * first time and writing now a new one. In this case the wl system
971          * has never seen any PEB used by the original fastmap.
972          */
973         if (!e) {
974                 e = fm_e;
975                 ubi_assert(e->ec >= 0);
976                 ubi->lookuptbl[pnum] = e;
977         } else {
978                 e->ec = fm_e->ec;
979                 kfree(fm_e);
980         }
981
982         spin_unlock(&ubi->wl_lock);
983
984         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
985         return schedule_erase(ubi, e, vol_id, lnum, torture);
986 }
987 #endif
988
989 /**
990  * wear_leveling_worker - wear-leveling worker function.
991  * @ubi: UBI device description object
992  * @wrk: the work object
993  * @cancel: non-zero if the worker has to free memory and exit
994  *
995  * This function copies a more worn out physical eraseblock to a less worn out
996  * one. Returns zero in case of success and a negative error code in case of
997  * failure.
998  */
999 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1000                                 int cancel)
1001 {
1002         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1003         int vol_id = -1, uninitialized_var(lnum);
1004 #ifdef CONFIG_MTD_UBI_FASTMAP
1005         int anchor = wrk->anchor;
1006 #endif
1007         struct ubi_wl_entry *e1, *e2;
1008         struct ubi_vid_hdr *vid_hdr;
1009
1010         kfree(wrk);
1011         if (cancel)
1012                 return 0;
1013
1014         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1015         if (!vid_hdr)
1016                 return -ENOMEM;
1017
1018         mutex_lock(&ubi->move_mutex);
1019         spin_lock(&ubi->wl_lock);
1020         ubi_assert(!ubi->move_from && !ubi->move_to);
1021         ubi_assert(!ubi->move_to_put);
1022
1023         if (!ubi->free.rb_node ||
1024             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1025                 /*
1026                  * No free physical eraseblocks? Well, they must be waiting in
1027                  * the queue to be erased. Cancel movement - it will be
1028                  * triggered again when a free physical eraseblock appears.
1029                  *
1030                  * No used physical eraseblocks? They must be temporarily
1031                  * protected from being moved. They will be moved to the
1032                  * @ubi->used tree later and the wear-leveling will be
1033                  * triggered again.
1034                  */
1035                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
1036                        !ubi->free.rb_node, !ubi->used.rb_node);
1037                 goto out_cancel;
1038         }
1039
1040 #ifdef CONFIG_MTD_UBI_FASTMAP
1041         /* Check whether we need to produce an anchor PEB */
1042         if (!anchor)
1043                 anchor = !anchor_pebs_avalible(&ubi->free);
1044
1045         if (anchor) {
1046                 e1 = find_anchor_wl_entry(&ubi->used);
1047                 if (!e1)
1048                         goto out_cancel;
1049                 e2 = get_peb_for_wl(ubi);
1050                 if (!e2)
1051                         goto out_cancel;
1052
1053                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1054                 rb_erase(&e1->u.rb, &ubi->used);
1055                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1056         } else if (!ubi->scrub.rb_node) {
1057 #else
1058         if (!ubi->scrub.rb_node) {
1059 #endif
1060                 /*
1061                  * Now pick the least worn-out used physical eraseblock and a
1062                  * highly worn-out free physical eraseblock. If the erase
1063                  * counters differ much enough, start wear-leveling.
1064                  */
1065                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1066                 e2 = get_peb_for_wl(ubi);
1067                 if (!e2)
1068                         goto out_cancel;
1069
1070                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1071                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
1072                                e1->ec, e2->ec);
1073
1074                         /* Give the unused PEB back */
1075                         wl_tree_add(e2, &ubi->free);
1076                         ubi->free_count++;
1077                         goto out_cancel;
1078                 }
1079                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1080                 rb_erase(&e1->u.rb, &ubi->used);
1081                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1082                        e1->pnum, e1->ec, e2->pnum, e2->ec);
1083         } else {
1084                 /* Perform scrubbing */
1085                 scrubbing = 1;
1086                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1087                 e2 = get_peb_for_wl(ubi);
1088                 if (!e2)
1089                         goto out_cancel;
1090
1091                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1092                 rb_erase(&e1->u.rb, &ubi->scrub);
1093                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1094         }
1095
1096         ubi->move_from = e1;
1097         ubi->move_to = e2;
1098         spin_unlock(&ubi->wl_lock);
1099
1100         /*
1101          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
1102          * We so far do not know which logical eraseblock our physical
1103          * eraseblock (@e1) belongs to. We have to read the volume identifier
1104          * header first.
1105          *
1106          * Note, we are protected from this PEB being unmapped and erased. The
1107          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
1108          * which is being moved was unmapped.
1109          */
1110
1111         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1112         if (err && err != UBI_IO_BITFLIPS) {
1113                 if (err == UBI_IO_FF) {
1114                         /*
1115                          * We are trying to move PEB without a VID header. UBI
1116                          * always write VID headers shortly after the PEB was
1117                          * given, so we have a situation when it has not yet
1118                          * had a chance to write it, because it was preempted.
1119                          * So add this PEB to the protection queue so far,
1120                          * because presumably more data will be written there
1121                          * (including the missing VID header), and then we'll
1122                          * move it.
1123                          */
1124                         dbg_wl("PEB %d has no VID header", e1->pnum);
1125                         protect = 1;
1126                         goto out_not_moved;
1127                 } else if (err == UBI_IO_FF_BITFLIPS) {
1128                         /*
1129                          * The same situation as %UBI_IO_FF, but bit-flips were
1130                          * detected. It is better to schedule this PEB for
1131                          * scrubbing.
1132                          */
1133                         dbg_wl("PEB %d has no VID header but has bit-flips",
1134                                e1->pnum);
1135                         scrubbing = 1;
1136                         goto out_not_moved;
1137                 }
1138
1139                 ubi_err("error %d while reading VID header from PEB %d",
1140                         err, e1->pnum);
1141                 goto out_error;
1142         }
1143
1144         vol_id = be32_to_cpu(vid_hdr->vol_id);
1145         lnum = be32_to_cpu(vid_hdr->lnum);
1146
1147         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1148         if (err) {
1149                 if (err == MOVE_CANCEL_RACE) {
1150                         /*
1151                          * The LEB has not been moved because the volume is
1152                          * being deleted or the PEB has been put meanwhile. We
1153                          * should prevent this PEB from being selected for
1154                          * wear-leveling movement again, so put it to the
1155                          * protection queue.
1156                          */
1157                         protect = 1;
1158                         goto out_not_moved;
1159                 }
1160                 if (err == MOVE_RETRY) {
1161                         scrubbing = 1;
1162                         goto out_not_moved;
1163                 }
1164                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1165                     err == MOVE_TARGET_RD_ERR) {
1166                         /*
1167                          * Target PEB had bit-flips or write error - torture it.
1168                          */
1169                         torture = 1;
1170                         goto out_not_moved;
1171                 }
1172
1173                 if (err == MOVE_SOURCE_RD_ERR) {
1174                         /*
1175                          * An error happened while reading the source PEB. Do
1176                          * not switch to R/O mode in this case, and give the
1177                          * upper layers a possibility to recover from this,
1178                          * e.g. by unmapping corresponding LEB. Instead, just
1179                          * put this PEB to the @ubi->erroneous list to prevent
1180                          * UBI from trying to move it over and over again.
1181                          */
1182                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1183                                 ubi_err("too many erroneous eraseblocks (%d)",
1184                                         ubi->erroneous_peb_count);
1185                                 goto out_error;
1186                         }
1187                         erroneous = 1;
1188                         goto out_not_moved;
1189                 }
1190
1191                 if (err < 0)
1192                         goto out_error;
1193
1194                 ubi_assert(0);
1195         }
1196
1197         /* The PEB has been successfully moved */
1198         if (scrubbing)
1199                 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1200                         e1->pnum, vol_id, lnum, e2->pnum);
1201         ubi_free_vid_hdr(ubi, vid_hdr);
1202
1203         spin_lock(&ubi->wl_lock);
1204         if (!ubi->move_to_put) {
1205                 wl_tree_add(e2, &ubi->used);
1206                 e2 = NULL;
1207         }
1208         ubi->move_from = ubi->move_to = NULL;
1209         ubi->move_to_put = ubi->wl_scheduled = 0;
1210         spin_unlock(&ubi->wl_lock);
1211
1212         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1213         if (err) {
1214                 kmem_cache_free(ubi_wl_entry_slab, e1);
1215                 if (e2)
1216                         kmem_cache_free(ubi_wl_entry_slab, e2);
1217                 goto out_ro;
1218         }
1219
1220         if (e2) {
1221                 /*
1222                  * Well, the target PEB was put meanwhile, schedule it for
1223                  * erasure.
1224                  */
1225                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1226                        e2->pnum, vol_id, lnum);
1227                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1228                 if (err) {
1229                         kmem_cache_free(ubi_wl_entry_slab, e2);
1230                         goto out_ro;
1231                 }
1232         }
1233
1234         dbg_wl("done");
1235         mutex_unlock(&ubi->move_mutex);
1236         return 0;
1237
1238         /*
1239          * For some reasons the LEB was not moved, might be an error, might be
1240          * something else. @e1 was not changed, so return it back. @e2 might
1241          * have been changed, schedule it for erasure.
1242          */
1243 out_not_moved:
1244         if (vol_id != -1)
1245                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1246                        e1->pnum, vol_id, lnum, e2->pnum, err);
1247         else
1248                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1249                        e1->pnum, e2->pnum, err);
1250         spin_lock(&ubi->wl_lock);
1251         if (protect)
1252                 prot_queue_add(ubi, e1);
1253         else if (erroneous) {
1254                 wl_tree_add(e1, &ubi->erroneous);
1255                 ubi->erroneous_peb_count += 1;
1256         } else if (scrubbing)
1257                 wl_tree_add(e1, &ubi->scrub);
1258         else
1259                 wl_tree_add(e1, &ubi->used);
1260         ubi_assert(!ubi->move_to_put);
1261         ubi->move_from = ubi->move_to = NULL;
1262         ubi->wl_scheduled = 0;
1263         spin_unlock(&ubi->wl_lock);
1264
1265         ubi_free_vid_hdr(ubi, vid_hdr);
1266         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1267         if (err) {
1268                 kmem_cache_free(ubi_wl_entry_slab, e2);
1269                 goto out_ro;
1270         }
1271         mutex_unlock(&ubi->move_mutex);
1272         return 0;
1273
1274 out_error:
1275         if (vol_id != -1)
1276                 ubi_err("error %d while moving PEB %d to PEB %d",
1277                         err, e1->pnum, e2->pnum);
1278         else
1279                 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1280                         err, e1->pnum, vol_id, lnum, e2->pnum);
1281         spin_lock(&ubi->wl_lock);
1282         ubi->move_from = ubi->move_to = NULL;
1283         ubi->move_to_put = ubi->wl_scheduled = 0;
1284         spin_unlock(&ubi->wl_lock);
1285
1286         ubi_free_vid_hdr(ubi, vid_hdr);
1287         kmem_cache_free(ubi_wl_entry_slab, e1);
1288         kmem_cache_free(ubi_wl_entry_slab, e2);
1289
1290 out_ro:
1291         ubi_ro_mode(ubi);
1292         mutex_unlock(&ubi->move_mutex);
1293         ubi_assert(err != 0);
1294         return err < 0 ? err : -EIO;
1295
1296 out_cancel:
1297         ubi->wl_scheduled = 0;
1298         spin_unlock(&ubi->wl_lock);
1299         mutex_unlock(&ubi->move_mutex);
1300         ubi_free_vid_hdr(ubi, vid_hdr);
1301         return 0;
1302 }
1303
1304 /**
1305  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1306  * @ubi: UBI device description object
1307  * @nested: set to non-zero if this function is called from UBI worker
1308  *
1309  * This function checks if it is time to start wear-leveling and schedules it
1310  * if yes. This function returns zero in case of success and a negative error
1311  * code in case of failure.
1312  */
1313 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1314 {
1315         int err = 0;
1316         struct ubi_wl_entry *e1;
1317         struct ubi_wl_entry *e2;
1318         struct ubi_work *wrk;
1319
1320         spin_lock(&ubi->wl_lock);
1321         if (ubi->wl_scheduled)
1322                 /* Wear-leveling is already in the work queue */
1323                 goto out_unlock;
1324
1325         /*
1326          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1327          * the WL worker has to be scheduled anyway.
1328          */
1329         if (!ubi->scrub.rb_node) {
1330                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1331                         /* No physical eraseblocks - no deal */
1332                         goto out_unlock;
1333
1334                 /*
1335                  * We schedule wear-leveling only if the difference between the
1336                  * lowest erase counter of used physical eraseblocks and a high
1337                  * erase counter of free physical eraseblocks is greater than
1338                  * %UBI_WL_THRESHOLD.
1339                  */
1340                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1341                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1342
1343                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1344                         goto out_unlock;
1345                 dbg_wl("schedule wear-leveling");
1346         } else
1347                 dbg_wl("schedule scrubbing");
1348
1349         ubi->wl_scheduled = 1;
1350         spin_unlock(&ubi->wl_lock);
1351
1352         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1353         if (!wrk) {
1354                 err = -ENOMEM;
1355                 goto out_cancel;
1356         }
1357
1358         wrk->anchor = 0;
1359         wrk->func = &wear_leveling_worker;
1360         if (nested)
1361                 __schedule_ubi_work(ubi, wrk);
1362         else
1363                 schedule_ubi_work(ubi, wrk);
1364         return err;
1365
1366 out_cancel:
1367         spin_lock(&ubi->wl_lock);
1368         ubi->wl_scheduled = 0;
1369 out_unlock:
1370         spin_unlock(&ubi->wl_lock);
1371         return err;
1372 }
1373
1374 #ifdef CONFIG_MTD_UBI_FASTMAP
1375 /**
1376  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1377  * @ubi: UBI device description object
1378  */
1379 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1380 {
1381         struct ubi_work *wrk;
1382
1383         spin_lock(&ubi->wl_lock);
1384         if (ubi->wl_scheduled) {
1385                 spin_unlock(&ubi->wl_lock);
1386                 return 0;
1387         }
1388         ubi->wl_scheduled = 1;
1389         spin_unlock(&ubi->wl_lock);
1390
1391         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1392         if (!wrk) {
1393                 spin_lock(&ubi->wl_lock);
1394                 ubi->wl_scheduled = 0;
1395                 spin_unlock(&ubi->wl_lock);
1396                 return -ENOMEM;
1397         }
1398
1399         wrk->anchor = 1;
1400         wrk->func = &wear_leveling_worker;
1401         schedule_ubi_work(ubi, wrk);
1402         return 0;
1403 }
1404 #endif
1405
1406 /**
1407  * erase_worker - physical eraseblock erase worker function.
1408  * @ubi: UBI device description object
1409  * @wl_wrk: the work object
1410  * @cancel: non-zero if the worker has to free memory and exit
1411  *
1412  * This function erases a physical eraseblock and perform torture testing if
1413  * needed. It also takes care about marking the physical eraseblock bad if
1414  * needed. Returns zero in case of success and a negative error code in case of
1415  * failure.
1416  */
1417 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1418                         int cancel)
1419 {
1420         struct ubi_wl_entry *e = wl_wrk->e;
1421         int pnum = e->pnum;
1422         int vol_id = wl_wrk->vol_id;
1423         int lnum = wl_wrk->lnum;
1424         int err, available_consumed = 0;
1425
1426         if (cancel) {
1427                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1428                 kfree(wl_wrk);
1429                 kmem_cache_free(ubi_wl_entry_slab, e);
1430                 return 0;
1431         }
1432
1433         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1434                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1435
1436         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1437
1438         err = sync_erase(ubi, e, wl_wrk->torture);
1439         if (!err) {
1440                 /* Fine, we've erased it successfully */
1441                 kfree(wl_wrk);
1442
1443                 spin_lock(&ubi->wl_lock);
1444                 wl_tree_add(e, &ubi->free);
1445                 ubi->free_count++;
1446                 spin_unlock(&ubi->wl_lock);
1447
1448                 /*
1449                  * One more erase operation has happened, take care about
1450                  * protected physical eraseblocks.
1451                  */
1452                 serve_prot_queue(ubi);
1453
1454                 /* And take care about wear-leveling */
1455                 err = ensure_wear_leveling(ubi, 1);
1456                 return err;
1457         }
1458
1459         ubi_err("failed to erase PEB %d, error %d", pnum, err);
1460         kfree(wl_wrk);
1461
1462         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1463             err == -EBUSY) {
1464                 int err1;
1465
1466                 /* Re-schedule the LEB for erasure */
1467                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1468                 if (err1) {
1469                         err = err1;
1470                         goto out_ro;
1471                 }
1472                 return err;
1473         }
1474
1475         kmem_cache_free(ubi_wl_entry_slab, e);
1476         if (err != -EIO)
1477                 /*
1478                  * If this is not %-EIO, we have no idea what to do. Scheduling
1479                  * this physical eraseblock for erasure again would cause
1480                  * errors again and again. Well, lets switch to R/O mode.
1481                  */
1482                 goto out_ro;
1483
1484         /* It is %-EIO, the PEB went bad */
1485
1486         if (!ubi->bad_allowed) {
1487                 ubi_err("bad physical eraseblock %d detected", pnum);
1488                 goto out_ro;
1489         }
1490
1491         spin_lock(&ubi->volumes_lock);
1492         if (ubi->beb_rsvd_pebs == 0) {
1493                 if (ubi->avail_pebs == 0) {
1494                         spin_unlock(&ubi->volumes_lock);
1495                         ubi_err("no reserved/available physical eraseblocks");
1496                         goto out_ro;
1497                 }
1498                 ubi->avail_pebs -= 1;
1499                 available_consumed = 1;
1500         }
1501         spin_unlock(&ubi->volumes_lock);
1502
1503         ubi_msg("mark PEB %d as bad", pnum);
1504         err = ubi_io_mark_bad(ubi, pnum);
1505         if (err)
1506                 goto out_ro;
1507
1508         spin_lock(&ubi->volumes_lock);
1509         if (ubi->beb_rsvd_pebs > 0) {
1510                 if (available_consumed) {
1511                         /*
1512                          * The amount of reserved PEBs increased since we last
1513                          * checked.
1514                          */
1515                         ubi->avail_pebs += 1;
1516                         available_consumed = 0;
1517                 }
1518                 ubi->beb_rsvd_pebs -= 1;
1519         }
1520         ubi->bad_peb_count += 1;
1521         ubi->good_peb_count -= 1;
1522         ubi_calculate_reserved(ubi);
1523         if (available_consumed)
1524                 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1525         else if (ubi->beb_rsvd_pebs)
1526                 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1527         else
1528                 ubi_warn("last PEB from the reserve was used");
1529         spin_unlock(&ubi->volumes_lock);
1530
1531         return err;
1532
1533 out_ro:
1534         if (available_consumed) {
1535                 spin_lock(&ubi->volumes_lock);
1536                 ubi->avail_pebs += 1;
1537                 spin_unlock(&ubi->volumes_lock);
1538         }
1539         ubi_ro_mode(ubi);
1540         return err;
1541 }
1542
1543 /**
1544  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1545  * @ubi: UBI device description object
1546  * @vol_id: the volume ID that last used this PEB
1547  * @lnum: the last used logical eraseblock number for the PEB
1548  * @pnum: physical eraseblock to return
1549  * @torture: if this physical eraseblock has to be tortured
1550  *
1551  * This function is called to return physical eraseblock @pnum to the pool of
1552  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1553  * occurred to this @pnum and it has to be tested. This function returns zero
1554  * in case of success, and a negative error code in case of failure.
1555  */
1556 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1557                    int pnum, int torture)
1558 {
1559         int err;
1560         struct ubi_wl_entry *e;
1561
1562         dbg_wl("PEB %d", pnum);
1563         ubi_assert(pnum >= 0);
1564         ubi_assert(pnum < ubi->peb_count);
1565
1566 retry:
1567         spin_lock(&ubi->wl_lock);
1568         e = ubi->lookuptbl[pnum];
1569         if (e == ubi->move_from) {
1570                 /*
1571                  * User is putting the physical eraseblock which was selected to
1572                  * be moved. It will be scheduled for erasure in the
1573                  * wear-leveling worker.
1574                  */
1575                 dbg_wl("PEB %d is being moved, wait", pnum);
1576                 spin_unlock(&ubi->wl_lock);
1577
1578                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1579                 mutex_lock(&ubi->move_mutex);
1580                 mutex_unlock(&ubi->move_mutex);
1581                 goto retry;
1582         } else if (e == ubi->move_to) {
1583                 /*
1584                  * User is putting the physical eraseblock which was selected
1585                  * as the target the data is moved to. It may happen if the EBA
1586                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1587                  * but the WL sub-system has not put the PEB to the "used" tree
1588                  * yet, but it is about to do this. So we just set a flag which
1589                  * will tell the WL worker that the PEB is not needed anymore
1590                  * and should be scheduled for erasure.
1591                  */
1592                 dbg_wl("PEB %d is the target of data moving", pnum);
1593                 ubi_assert(!ubi->move_to_put);
1594                 ubi->move_to_put = 1;
1595                 spin_unlock(&ubi->wl_lock);
1596                 return 0;
1597         } else {
1598                 if (in_wl_tree(e, &ubi->used)) {
1599                         self_check_in_wl_tree(ubi, e, &ubi->used);
1600                         rb_erase(&e->u.rb, &ubi->used);
1601                 } else if (in_wl_tree(e, &ubi->scrub)) {
1602                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1603                         rb_erase(&e->u.rb, &ubi->scrub);
1604                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1605                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1606                         rb_erase(&e->u.rb, &ubi->erroneous);
1607                         ubi->erroneous_peb_count -= 1;
1608                         ubi_assert(ubi->erroneous_peb_count >= 0);
1609                         /* Erroneous PEBs should be tortured */
1610                         torture = 1;
1611                 } else {
1612                         err = prot_queue_del(ubi, e->pnum);
1613                         if (err) {
1614                                 ubi_err("PEB %d not found", pnum);
1615                                 ubi_ro_mode(ubi);
1616                                 spin_unlock(&ubi->wl_lock);
1617                                 return err;
1618                         }
1619                 }
1620         }
1621         spin_unlock(&ubi->wl_lock);
1622
1623         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1624         if (err) {
1625                 spin_lock(&ubi->wl_lock);
1626                 wl_tree_add(e, &ubi->used);
1627                 spin_unlock(&ubi->wl_lock);
1628         }
1629
1630         return err;
1631 }
1632
1633 /**
1634  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1635  * @ubi: UBI device description object
1636  * @pnum: the physical eraseblock to schedule
1637  *
1638  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1639  * needs scrubbing. This function schedules a physical eraseblock for
1640  * scrubbing which is done in background. This function returns zero in case of
1641  * success and a negative error code in case of failure.
1642  */
1643 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1644 {
1645         struct ubi_wl_entry *e;
1646
1647         ubi_msg("schedule PEB %d for scrubbing", pnum);
1648
1649 retry:
1650         spin_lock(&ubi->wl_lock);
1651         e = ubi->lookuptbl[pnum];
1652         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1653                                    in_wl_tree(e, &ubi->erroneous)) {
1654                 spin_unlock(&ubi->wl_lock);
1655                 return 0;
1656         }
1657
1658         if (e == ubi->move_to) {
1659                 /*
1660                  * This physical eraseblock was used to move data to. The data
1661                  * was moved but the PEB was not yet inserted to the proper
1662                  * tree. We should just wait a little and let the WL worker
1663                  * proceed.
1664                  */
1665                 spin_unlock(&ubi->wl_lock);
1666                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1667                 yield();
1668                 goto retry;
1669         }
1670
1671         if (in_wl_tree(e, &ubi->used)) {
1672                 self_check_in_wl_tree(ubi, e, &ubi->used);
1673                 rb_erase(&e->u.rb, &ubi->used);
1674         } else {
1675                 int err;
1676
1677                 err = prot_queue_del(ubi, e->pnum);
1678                 if (err) {
1679                         ubi_err("PEB %d not found", pnum);
1680                         ubi_ro_mode(ubi);
1681                         spin_unlock(&ubi->wl_lock);
1682                         return err;
1683                 }
1684         }
1685
1686         wl_tree_add(e, &ubi->scrub);
1687         spin_unlock(&ubi->wl_lock);
1688
1689         /*
1690          * Technically scrubbing is the same as wear-leveling, so it is done
1691          * by the WL worker.
1692          */
1693         return ensure_wear_leveling(ubi, 0);
1694 }
1695
1696 /**
1697  * ubi_wl_flush - flush all pending works.
1698  * @ubi: UBI device description object
1699  * @vol_id: the volume id to flush for
1700  * @lnum: the logical eraseblock number to flush for
1701  *
1702  * This function executes all pending works for a particular volume id /
1703  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1704  * acts as a wildcard for all of the corresponding volume numbers or logical
1705  * eraseblock numbers. It returns zero in case of success and a negative error
1706  * code in case of failure.
1707  */
1708 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1709 {
1710         int err = 0;
1711         int found = 1;
1712
1713         /*
1714          * Erase while the pending works queue is not empty, but not more than
1715          * the number of currently pending works.
1716          */
1717         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1718                vol_id, lnum, ubi->works_count);
1719
1720         while (found) {
1721                 struct ubi_work *wrk, *tmp;
1722                 found = 0;
1723
1724                 down_read(&ubi->work_sem);
1725                 spin_lock(&ubi->wl_lock);
1726                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1727                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1728                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1729                                 list_del(&wrk->list);
1730                                 ubi->works_count -= 1;
1731                                 ubi_assert(ubi->works_count >= 0);
1732                                 spin_unlock(&ubi->wl_lock);
1733
1734                                 err = wrk->func(ubi, wrk, 0);
1735                                 if (err) {
1736                                         up_read(&ubi->work_sem);
1737                                         return err;
1738                                 }
1739
1740                                 spin_lock(&ubi->wl_lock);
1741                                 found = 1;
1742                                 break;
1743                         }
1744                 }
1745                 spin_unlock(&ubi->wl_lock);
1746                 up_read(&ubi->work_sem);
1747         }
1748
1749         /*
1750          * Make sure all the works which have been done in parallel are
1751          * finished.
1752          */
1753         down_write(&ubi->work_sem);
1754         up_write(&ubi->work_sem);
1755
1756         return err;
1757 }
1758
1759 /**
1760  * tree_destroy - destroy an RB-tree.
1761  * @root: the root of the tree to destroy
1762  */
1763 static void tree_destroy(struct rb_root *root)
1764 {
1765         struct rb_node *rb;
1766         struct ubi_wl_entry *e;
1767
1768         rb = root->rb_node;
1769         while (rb) {
1770                 if (rb->rb_left)
1771                         rb = rb->rb_left;
1772                 else if (rb->rb_right)
1773                         rb = rb->rb_right;
1774                 else {
1775                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1776
1777                         rb = rb_parent(rb);
1778                         if (rb) {
1779                                 if (rb->rb_left == &e->u.rb)
1780                                         rb->rb_left = NULL;
1781                                 else
1782                                         rb->rb_right = NULL;
1783                         }
1784
1785                         kmem_cache_free(ubi_wl_entry_slab, e);
1786                 }
1787         }
1788 }
1789
1790 /**
1791  * ubi_thread - UBI background thread.
1792  * @u: the UBI device description object pointer
1793  */
1794 int ubi_thread(void *u)
1795 {
1796         int failures = 0;
1797         struct ubi_device *ubi = u;
1798
1799         ubi_msg("background thread \"%s\" started, PID %d",
1800                 ubi->bgt_name, task_pid_nr(current));
1801
1802         set_freezable();
1803         for (;;) {
1804                 int err;
1805
1806                 if (kthread_should_stop())
1807                         break;
1808
1809                 if (try_to_freeze())
1810                         continue;
1811
1812                 spin_lock(&ubi->wl_lock);
1813                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1814                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1815                         set_current_state(TASK_INTERRUPTIBLE);
1816                         spin_unlock(&ubi->wl_lock);
1817                         schedule();
1818                         continue;
1819                 }
1820                 spin_unlock(&ubi->wl_lock);
1821
1822                 err = do_work(ubi);
1823                 if (err) {
1824                         ubi_err("%s: work failed with error code %d",
1825                                 ubi->bgt_name, err);
1826                         if (failures++ > WL_MAX_FAILURES) {
1827                                 /*
1828                                  * Too many failures, disable the thread and
1829                                  * switch to read-only mode.
1830                                  */
1831                                 ubi_msg("%s: %d consecutive failures",
1832                                         ubi->bgt_name, WL_MAX_FAILURES);
1833                                 ubi_ro_mode(ubi);
1834                                 ubi->thread_enabled = 0;
1835                                 continue;
1836                         }
1837                 } else
1838                         failures = 0;
1839
1840                 cond_resched();
1841         }
1842
1843         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1844         return 0;
1845 }
1846
1847 /**
1848  * cancel_pending - cancel all pending works.
1849  * @ubi: UBI device description object
1850  */
1851 static void cancel_pending(struct ubi_device *ubi)
1852 {
1853         while (!list_empty(&ubi->works)) {
1854                 struct ubi_work *wrk;
1855
1856                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1857                 list_del(&wrk->list);
1858                 wrk->func(ubi, wrk, 1);
1859                 ubi->works_count -= 1;
1860                 ubi_assert(ubi->works_count >= 0);
1861         }
1862 }
1863
1864 /**
1865  * ubi_wl_init - initialize the WL sub-system using attaching information.
1866  * @ubi: UBI device description object
1867  * @ai: attaching information
1868  *
1869  * This function returns zero in case of success, and a negative error code in
1870  * case of failure.
1871  */
1872 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1873 {
1874         int err, i, reserved_pebs, found_pebs = 0;
1875         struct rb_node *rb1, *rb2;
1876         struct ubi_ainf_volume *av;
1877         struct ubi_ainf_peb *aeb, *tmp;
1878         struct ubi_wl_entry *e;
1879
1880         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1881         spin_lock_init(&ubi->wl_lock);
1882         mutex_init(&ubi->move_mutex);
1883         init_rwsem(&ubi->work_sem);
1884         ubi->max_ec = ai->max_ec;
1885         INIT_LIST_HEAD(&ubi->works);
1886 #ifdef CONFIG_MTD_UBI_FASTMAP
1887         INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1888 #endif
1889
1890         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1891
1892         err = -ENOMEM;
1893         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1894         if (!ubi->lookuptbl)
1895                 return err;
1896
1897         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1898                 INIT_LIST_HEAD(&ubi->pq[i]);
1899         ubi->pq_head = 0;
1900
1901         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1902                 cond_resched();
1903
1904                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1905                 if (!e)
1906                         goto out_free;
1907
1908                 e->pnum = aeb->pnum;
1909                 e->ec = aeb->ec;
1910                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1911                 ubi->lookuptbl[e->pnum] = e;
1912                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1913                         kmem_cache_free(ubi_wl_entry_slab, e);
1914                         goto out_free;
1915                 }
1916
1917                 found_pebs++;
1918         }
1919
1920         ubi->free_count = 0;
1921         list_for_each_entry(aeb, &ai->free, u.list) {
1922                 cond_resched();
1923
1924                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1925                 if (!e)
1926                         goto out_free;
1927
1928                 e->pnum = aeb->pnum;
1929                 e->ec = aeb->ec;
1930                 ubi_assert(e->ec >= 0);
1931                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1932
1933                 wl_tree_add(e, &ubi->free);
1934                 ubi->free_count++;
1935
1936                 ubi->lookuptbl[e->pnum] = e;
1937
1938                 found_pebs++;
1939         }
1940
1941         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1942                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1943                         cond_resched();
1944
1945                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1946                         if (!e)
1947                                 goto out_free;
1948
1949                         e->pnum = aeb->pnum;
1950                         e->ec = aeb->ec;
1951                         ubi->lookuptbl[e->pnum] = e;
1952
1953                         if (!aeb->scrub) {
1954                                 dbg_wl("add PEB %d EC %d to the used tree",
1955                                        e->pnum, e->ec);
1956                                 wl_tree_add(e, &ubi->used);
1957                         } else {
1958                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1959                                        e->pnum, e->ec);
1960                                 wl_tree_add(e, &ubi->scrub);
1961                         }
1962
1963                         found_pebs++;
1964                 }
1965         }
1966
1967         dbg_wl("found %i PEBs", found_pebs);
1968
1969         if (ubi->fm)
1970                 ubi_assert(ubi->good_peb_count == \
1971                            found_pebs + ubi->fm->used_blocks);
1972         else
1973                 ubi_assert(ubi->good_peb_count == found_pebs);
1974
1975         reserved_pebs = WL_RESERVED_PEBS;
1976 #ifdef CONFIG_MTD_UBI_FASTMAP
1977         /* Reserve enough LEBs to store two fastmaps. */
1978         reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1979 #endif
1980
1981         if (ubi->avail_pebs < reserved_pebs) {
1982                 ubi_err("no enough physical eraseblocks (%d, need %d)",
1983                         ubi->avail_pebs, reserved_pebs);
1984                 if (ubi->corr_peb_count)
1985                         ubi_err("%d PEBs are corrupted and not used",
1986                                 ubi->corr_peb_count);
1987                 goto out_free;
1988         }
1989         ubi->avail_pebs -= reserved_pebs;
1990         ubi->rsvd_pebs += reserved_pebs;
1991
1992         /* Schedule wear-leveling if needed */
1993         err = ensure_wear_leveling(ubi, 0);
1994         if (err)
1995                 goto out_free;
1996
1997         return 0;
1998
1999 out_free:
2000         cancel_pending(ubi);
2001         tree_destroy(&ubi->used);
2002         tree_destroy(&ubi->free);
2003         tree_destroy(&ubi->scrub);
2004         kfree(ubi->lookuptbl);
2005         return err;
2006 }
2007
2008 /**
2009  * protection_queue_destroy - destroy the protection queue.
2010  * @ubi: UBI device description object
2011  */
2012 static void protection_queue_destroy(struct ubi_device *ubi)
2013 {
2014         int i;
2015         struct ubi_wl_entry *e, *tmp;
2016
2017         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2018                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2019                         list_del(&e->u.list);
2020                         kmem_cache_free(ubi_wl_entry_slab, e);
2021                 }
2022         }
2023 }
2024
2025 /**
2026  * ubi_wl_close - close the wear-leveling sub-system.
2027  * @ubi: UBI device description object
2028  */
2029 void ubi_wl_close(struct ubi_device *ubi)
2030 {
2031         dbg_wl("close the WL sub-system");
2032         cancel_pending(ubi);
2033         protection_queue_destroy(ubi);
2034         tree_destroy(&ubi->used);
2035         tree_destroy(&ubi->erroneous);
2036         tree_destroy(&ubi->free);
2037         tree_destroy(&ubi->scrub);
2038         kfree(ubi->lookuptbl);
2039 }
2040
2041 /**
2042  * self_check_ec - make sure that the erase counter of a PEB is correct.
2043  * @ubi: UBI device description object
2044  * @pnum: the physical eraseblock number to check
2045  * @ec: the erase counter to check
2046  *
2047  * This function returns zero if the erase counter of physical eraseblock @pnum
2048  * is equivalent to @ec, and a negative error code if not or if an error
2049  * occurred.
2050  */
2051 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2052 {
2053         int err;
2054         long long read_ec;
2055         struct ubi_ec_hdr *ec_hdr;
2056
2057         if (!ubi_dbg_chk_gen(ubi))
2058                 return 0;
2059
2060         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2061         if (!ec_hdr)
2062                 return -ENOMEM;
2063
2064         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2065         if (err && err != UBI_IO_BITFLIPS) {
2066                 /* The header does not have to exist */
2067                 err = 0;
2068                 goto out_free;
2069         }
2070
2071         read_ec = be64_to_cpu(ec_hdr->ec);
2072         if (ec != read_ec && read_ec - ec > 1) {
2073                 ubi_err("self-check failed for PEB %d", pnum);
2074                 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2075                 dump_stack();
2076                 err = 1;
2077         } else
2078                 err = 0;
2079
2080 out_free:
2081         kfree(ec_hdr);
2082         return err;
2083 }
2084
2085 /**
2086  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2087  * @ubi: UBI device description object
2088  * @e: the wear-leveling entry to check
2089  * @root: the root of the tree
2090  *
2091  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2092  * is not.
2093  */
2094 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2095                                  struct ubi_wl_entry *e, struct rb_root *root)
2096 {
2097         if (!ubi_dbg_chk_gen(ubi))
2098                 return 0;
2099
2100         if (in_wl_tree(e, root))
2101                 return 0;
2102
2103         ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2104                 e->pnum, e->ec, root);
2105         dump_stack();
2106         return -EINVAL;
2107 }
2108
2109 /**
2110  * self_check_in_pq - check if wear-leveling entry is in the protection
2111  *                        queue.
2112  * @ubi: UBI device description object
2113  * @e: the wear-leveling entry to check
2114  *
2115  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2116  */
2117 static int self_check_in_pq(const struct ubi_device *ubi,
2118                             struct ubi_wl_entry *e)
2119 {
2120         struct ubi_wl_entry *p;
2121         int i;
2122
2123         if (!ubi_dbg_chk_gen(ubi))
2124                 return 0;
2125
2126         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2127                 list_for_each_entry(p, &ubi->pq[i], u.list)
2128                         if (p == e)
2129                                 return 0;
2130
2131         ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2132                 e->pnum, e->ec);
2133         dump_stack();
2134         return -EINVAL;
2135 }