Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
44  * bad.
45  *
46  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
47  * in a physical eraseblock, it has to be moved. Technically this is the same
48  * as moving it for wear-leveling reasons.
49  *
50  * As it was said, for the UBI sub-system all physical eraseblocks are either
51  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
52  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
53  * RB-trees, as well as (temporarily) in the @wl->pq queue.
54  *
55  * When the WL sub-system returns a physical eraseblock, the physical
56  * eraseblock is protected from being moved for some "time". For this reason,
57  * the physical eraseblock is not directly moved from the @wl->free tree to the
58  * @wl->used tree. There is a protection queue in between where this
59  * physical eraseblock is temporarily stored (@wl->pq).
60  *
61  * All this protection stuff is needed because:
62  *  o we don't want to move physical eraseblocks just after we have given them
63  *    to the user; instead, we first want to let users fill them up with data;
64  *
65  *  o there is a chance that the user will put the physical eraseblock very
66  *    soon, so it makes sense not to move it for some time, but wait.
67  *
68  * Physical eraseblocks stay protected only for limited time. But the "time" is
69  * measured in erase cycles in this case. This is implemented with help of the
70  * protection queue. Eraseblocks are put to the tail of this queue when they
71  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
72  * head of the queue on each erase operation (for any eraseblock). So the
73  * length of the queue defines how may (global) erase cycles PEBs are protected.
74  *
75  * To put it differently, each physical eraseblock has 2 main states: free and
76  * used. The former state corresponds to the @wl->free tree. The latter state
77  * is split up on several sub-states:
78  * o the WL movement is allowed (@wl->used tree);
79  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
80  *   erroneous - e.g., there was a read error;
81  * o the WL movement is temporarily prohibited (@wl->pq queue);
82  * o scrubbing is needed (@wl->scrub tree).
83  *
84  * Depending on the sub-state, wear-leveling entries of the used physical
85  * eraseblocks may be kept in one of those structures.
86  *
87  * Note, in this implementation, we keep a small in-RAM object for each physical
88  * eraseblock. This is surely not a scalable solution. But it appears to be good
89  * enough for moderately large flashes and it is simple. In future, one may
90  * re-work this sub-system and make it more scalable.
91  *
92  * At the moment this sub-system does not utilize the sequence number, which
93  * was introduced relatively recently. But it would be wise to do this because
94  * the sequence number of a logical eraseblock characterizes how old is it. For
95  * example, when we move a PEB with low erase counter, and we need to pick the
96  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
97  * pick target PEB with an average EC if our PEB is not very "old". This is a
98  * room for future re-works of the WL sub-system.
99  */
100
101 #include <linux/slab.h>
102 #include <linux/crc32.h>
103 #include <linux/freezer.h>
104 #include <linux/kthread.h>
105 #include "ubi.h"
106 #include "wl.h"
107
108 /* Number of physical eraseblocks reserved for wear-leveling purposes */
109 #define WL_RESERVED_PEBS 1
110
111 /*
112  * Maximum difference between two erase counters. If this threshold is
113  * exceeded, the WL sub-system starts moving data from used physical
114  * eraseblocks with low erase counter to free physical eraseblocks with high
115  * erase counter.
116  */
117 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
118
119 /*
120  * When a physical eraseblock is moved, the WL sub-system has to pick the target
121  * physical eraseblock to move to. The simplest way would be just to pick the
122  * one with the highest erase counter. But in certain workloads this could lead
123  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
124  * situation when the picked physical eraseblock is constantly erased after the
125  * data is written to it. So, we have a constant which limits the highest erase
126  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
127  * does not pick eraseblocks with erase counter greater than the lowest erase
128  * counter plus %WL_FREE_MAX_DIFF.
129  */
130 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
131
132 /*
133  * Maximum number of consecutive background thread failures which is enough to
134  * switch to read-only mode.
135  */
136 #define WL_MAX_FAILURES 32
137
138 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
139 static int self_check_in_wl_tree(const struct ubi_device *ubi,
140                                  struct ubi_wl_entry *e, struct rb_root *root);
141 static int self_check_in_pq(const struct ubi_device *ubi,
142                             struct ubi_wl_entry *e);
143
144 /**
145  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
146  * @e: the wear-leveling entry to add
147  * @root: the root of the tree
148  *
149  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
150  * the @ubi->used and @ubi->free RB-trees.
151  */
152 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
153 {
154         struct rb_node **p, *parent = NULL;
155
156         p = &root->rb_node;
157         while (*p) {
158                 struct ubi_wl_entry *e1;
159
160                 parent = *p;
161                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
162
163                 if (e->ec < e1->ec)
164                         p = &(*p)->rb_left;
165                 else if (e->ec > e1->ec)
166                         p = &(*p)->rb_right;
167                 else {
168                         ubi_assert(e->pnum != e1->pnum);
169                         if (e->pnum < e1->pnum)
170                                 p = &(*p)->rb_left;
171                         else
172                                 p = &(*p)->rb_right;
173                 }
174         }
175
176         rb_link_node(&e->u.rb, parent, p);
177         rb_insert_color(&e->u.rb, root);
178 }
179
180 /**
181  * wl_tree_destroy - destroy a wear-leveling entry.
182  * @ubi: UBI device description object
183  * @e: the wear-leveling entry to add
184  *
185  * This function destroys a wear leveling entry and removes
186  * the reference from the lookup table.
187  */
188 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
189 {
190         ubi->lookuptbl[e->pnum] = NULL;
191         kmem_cache_free(ubi_wl_entry_slab, e);
192 }
193
194 /**
195  * do_work - do one pending work.
196  * @ubi: UBI device description object
197  *
198  * This function returns zero in case of success and a negative error code in
199  * case of failure.
200  */
201 static int do_work(struct ubi_device *ubi)
202 {
203         int err;
204         struct ubi_work *wrk;
205
206         cond_resched();
207
208         /*
209          * @ubi->work_sem is used to synchronize with the workers. Workers take
210          * it in read mode, so many of them may be doing works at a time. But
211          * the queue flush code has to be sure the whole queue of works is
212          * done, and it takes the mutex in write mode.
213          */
214         down_read(&ubi->work_sem);
215         spin_lock(&ubi->wl_lock);
216         if (list_empty(&ubi->works)) {
217                 spin_unlock(&ubi->wl_lock);
218                 up_read(&ubi->work_sem);
219                 return 0;
220         }
221
222         wrk = list_entry(ubi->works.next, struct ubi_work, list);
223         list_del(&wrk->list);
224         ubi->works_count -= 1;
225         ubi_assert(ubi->works_count >= 0);
226         spin_unlock(&ubi->wl_lock);
227
228         /*
229          * Call the worker function. Do not touch the work structure
230          * after this call as it will have been freed or reused by that
231          * time by the worker function.
232          */
233         err = wrk->func(ubi, wrk, 0);
234         if (err)
235                 ubi_err(ubi, "work failed with error code %d", err);
236         up_read(&ubi->work_sem);
237
238         return err;
239 }
240
241 /**
242  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
243  * @e: the wear-leveling entry to check
244  * @root: the root of the tree
245  *
246  * This function returns non-zero if @e is in the @root RB-tree and zero if it
247  * is not.
248  */
249 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
250 {
251         struct rb_node *p;
252
253         p = root->rb_node;
254         while (p) {
255                 struct ubi_wl_entry *e1;
256
257                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
258
259                 if (e->pnum == e1->pnum) {
260                         ubi_assert(e == e1);
261                         return 1;
262                 }
263
264                 if (e->ec < e1->ec)
265                         p = p->rb_left;
266                 else if (e->ec > e1->ec)
267                         p = p->rb_right;
268                 else {
269                         ubi_assert(e->pnum != e1->pnum);
270                         if (e->pnum < e1->pnum)
271                                 p = p->rb_left;
272                         else
273                                 p = p->rb_right;
274                 }
275         }
276
277         return 0;
278 }
279
280 /**
281  * prot_queue_add - add physical eraseblock to the protection queue.
282  * @ubi: UBI device description object
283  * @e: the physical eraseblock to add
284  *
285  * This function adds @e to the tail of the protection queue @ubi->pq, where
286  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
287  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
288  * be locked.
289  */
290 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
291 {
292         int pq_tail = ubi->pq_head - 1;
293
294         if (pq_tail < 0)
295                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
296         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
297         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
298         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
299 }
300
301 /**
302  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
303  * @ubi: UBI device description object
304  * @root: the RB-tree where to look for
305  * @diff: maximum possible difference from the smallest erase counter
306  *
307  * This function looks for a wear leveling entry with erase counter closest to
308  * min + @diff, where min is the smallest erase counter.
309  */
310 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
311                                           struct rb_root *root, int diff)
312 {
313         struct rb_node *p;
314         struct ubi_wl_entry *e, *prev_e = NULL;
315         int max;
316
317         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
318         max = e->ec + diff;
319
320         p = root->rb_node;
321         while (p) {
322                 struct ubi_wl_entry *e1;
323
324                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
325                 if (e1->ec >= max)
326                         p = p->rb_left;
327                 else {
328                         p = p->rb_right;
329                         prev_e = e;
330                         e = e1;
331                 }
332         }
333
334         /* If no fastmap has been written and this WL entry can be used
335          * as anchor PEB, hold it back and return the second best WL entry
336          * such that fastmap can use the anchor PEB later. */
337         if (prev_e && !ubi->fm_disabled &&
338             !ubi->fm && e->pnum < UBI_FM_MAX_START)
339                 return prev_e;
340
341         return e;
342 }
343
344 /**
345  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
346  * @ubi: UBI device description object
347  * @root: the RB-tree where to look for
348  *
349  * This function looks for a wear leveling entry with medium erase counter,
350  * but not greater or equivalent than the lowest erase counter plus
351  * %WL_FREE_MAX_DIFF/2.
352  */
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
354                                                struct rb_root *root)
355 {
356         struct ubi_wl_entry *e, *first, *last;
357
358         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
359         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
360
361         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
362                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
363
364                 /* If no fastmap has been written and this WL entry can be used
365                  * as anchor PEB, hold it back and return the second best
366                  * WL entry such that fastmap can use the anchor PEB later. */
367                 e = may_reserve_for_fm(ubi, e, root);
368         } else
369                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
370
371         return e;
372 }
373
374 /**
375  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
376  * refill_wl_user_pool().
377  * @ubi: UBI device description object
378  *
379  * This function returns a a wear leveling entry in case of success and
380  * NULL in case of failure.
381  */
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
383 {
384         struct ubi_wl_entry *e;
385
386         e = find_mean_wl_entry(ubi, &ubi->free);
387         if (!e) {
388                 ubi_err(ubi, "no free eraseblocks");
389                 return NULL;
390         }
391
392         self_check_in_wl_tree(ubi, e, &ubi->free);
393
394         /*
395          * Move the physical eraseblock to the protection queue where it will
396          * be protected from being moved for some time.
397          */
398         rb_erase(&e->u.rb, &ubi->free);
399         ubi->free_count--;
400         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
401
402         return e;
403 }
404
405 /**
406  * prot_queue_del - remove a physical eraseblock from the protection queue.
407  * @ubi: UBI device description object
408  * @pnum: the physical eraseblock to remove
409  *
410  * This function deletes PEB @pnum from the protection queue and returns zero
411  * in case of success and %-ENODEV if the PEB was not found.
412  */
413 static int prot_queue_del(struct ubi_device *ubi, int pnum)
414 {
415         struct ubi_wl_entry *e;
416
417         e = ubi->lookuptbl[pnum];
418         if (!e)
419                 return -ENODEV;
420
421         if (self_check_in_pq(ubi, e))
422                 return -ENODEV;
423
424         list_del(&e->u.list);
425         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
426         return 0;
427 }
428
429 /**
430  * sync_erase - synchronously erase a physical eraseblock.
431  * @ubi: UBI device description object
432  * @e: the the physical eraseblock to erase
433  * @torture: if the physical eraseblock has to be tortured
434  *
435  * This function returns zero in case of success and a negative error code in
436  * case of failure.
437  */
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
439                       int torture)
440 {
441         int err;
442         struct ubi_ec_hdr *ec_hdr;
443         unsigned long long ec = e->ec;
444
445         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
446
447         err = self_check_ec(ubi, e->pnum, e->ec);
448         if (err)
449                 return -EINVAL;
450
451         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
452         if (!ec_hdr)
453                 return -ENOMEM;
454
455         err = ubi_io_sync_erase(ubi, e->pnum, torture);
456         if (err < 0)
457                 goto out_free;
458
459         ec += err;
460         if (ec > UBI_MAX_ERASECOUNTER) {
461                 /*
462                  * Erase counter overflow. Upgrade UBI and use 64-bit
463                  * erase counters internally.
464                  */
465                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
466                         e->pnum, ec);
467                 err = -EINVAL;
468                 goto out_free;
469         }
470
471         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
472
473         ec_hdr->ec = cpu_to_be64(ec);
474
475         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
476         if (err)
477                 goto out_free;
478
479         e->ec = ec;
480         spin_lock(&ubi->wl_lock);
481         if (e->ec > ubi->max_ec)
482                 ubi->max_ec = e->ec;
483         spin_unlock(&ubi->wl_lock);
484
485 out_free:
486         kfree(ec_hdr);
487         return err;
488 }
489
490 /**
491  * serve_prot_queue - check if it is time to stop protecting PEBs.
492  * @ubi: UBI device description object
493  *
494  * This function is called after each erase operation and removes PEBs from the
495  * tail of the protection queue. These PEBs have been protected for long enough
496  * and should be moved to the used tree.
497  */
498 static void serve_prot_queue(struct ubi_device *ubi)
499 {
500         struct ubi_wl_entry *e, *tmp;
501         int count;
502
503         /*
504          * There may be several protected physical eraseblock to remove,
505          * process them all.
506          */
507 repeat:
508         count = 0;
509         spin_lock(&ubi->wl_lock);
510         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
511                 dbg_wl("PEB %d EC %d protection over, move to used tree",
512                         e->pnum, e->ec);
513
514                 list_del(&e->u.list);
515                 wl_tree_add(e, &ubi->used);
516                 if (count++ > 32) {
517                         /*
518                          * Let's be nice and avoid holding the spinlock for
519                          * too long.
520                          */
521                         spin_unlock(&ubi->wl_lock);
522                         cond_resched();
523                         goto repeat;
524                 }
525         }
526
527         ubi->pq_head += 1;
528         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
529                 ubi->pq_head = 0;
530         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
531         spin_unlock(&ubi->wl_lock);
532 }
533
534 /**
535  * __schedule_ubi_work - schedule a work.
536  * @ubi: UBI device description object
537  * @wrk: the work to schedule
538  *
539  * This function adds a work defined by @wrk to the tail of the pending works
540  * list. Can only be used if ubi->work_sem is already held in read mode!
541  */
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
543 {
544         spin_lock(&ubi->wl_lock);
545         list_add_tail(&wrk->list, &ubi->works);
546         ubi_assert(ubi->works_count >= 0);
547         ubi->works_count += 1;
548         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
549                 wake_up_process(ubi->bgt_thread);
550         spin_unlock(&ubi->wl_lock);
551 }
552
553 /**
554  * schedule_ubi_work - schedule a work.
555  * @ubi: UBI device description object
556  * @wrk: the work to schedule
557  *
558  * This function adds a work defined by @wrk to the tail of the pending works
559  * list.
560  */
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562 {
563         down_read(&ubi->work_sem);
564         __schedule_ubi_work(ubi, wrk);
565         up_read(&ubi->work_sem);
566 }
567
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
569                         int shutdown);
570
571 /**
572  * schedule_erase - schedule an erase work.
573  * @ubi: UBI device description object
574  * @e: the WL entry of the physical eraseblock to erase
575  * @vol_id: the volume ID that last used this PEB
576  * @lnum: the last used logical eraseblock number for the PEB
577  * @torture: if the physical eraseblock has to be tortured
578  *
579  * This function returns zero in case of success and a %-ENOMEM in case of
580  * failure.
581  */
582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583                           int vol_id, int lnum, int torture, bool nested)
584 {
585         struct ubi_work *wl_wrk;
586
587         ubi_assert(e);
588
589         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
590                e->pnum, e->ec, torture);
591
592         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
593         if (!wl_wrk)
594                 return -ENOMEM;
595
596         wl_wrk->func = &erase_worker;
597         wl_wrk->e = e;
598         wl_wrk->vol_id = vol_id;
599         wl_wrk->lnum = lnum;
600         wl_wrk->torture = torture;
601
602         if (nested)
603                 __schedule_ubi_work(ubi, wl_wrk);
604         else
605                 schedule_ubi_work(ubi, wl_wrk);
606         return 0;
607 }
608
609 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
610 /**
611  * do_sync_erase - run the erase worker synchronously.
612  * @ubi: UBI device description object
613  * @e: the WL entry of the physical eraseblock to erase
614  * @vol_id: the volume ID that last used this PEB
615  * @lnum: the last used logical eraseblock number for the PEB
616  * @torture: if the physical eraseblock has to be tortured
617  *
618  */
619 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
620                          int vol_id, int lnum, int torture)
621 {
622         struct ubi_work wl_wrk;
623
624         dbg_wl("sync erase of PEB %i", e->pnum);
625
626         wl_wrk.e = e;
627         wl_wrk.vol_id = vol_id;
628         wl_wrk.lnum = lnum;
629         wl_wrk.torture = torture;
630
631         return __erase_worker(ubi, &wl_wrk);
632 }
633
634 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
635 /**
636  * wear_leveling_worker - wear-leveling worker function.
637  * @ubi: UBI device description object
638  * @wrk: the work object
639  * @shutdown: non-zero if the worker has to free memory and exit
640  * because the WL-subsystem is shutting down
641  *
642  * This function copies a more worn out physical eraseblock to a less worn out
643  * one. Returns zero in case of success and a negative error code in case of
644  * failure.
645  */
646 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
647                                 int shutdown)
648 {
649         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
650         int erase = 0, keep = 0, vol_id = -1, lnum = -1;
651 #ifdef CONFIG_MTD_UBI_FASTMAP
652         int anchor = wrk->anchor;
653 #endif
654         struct ubi_wl_entry *e1, *e2;
655         struct ubi_vid_io_buf *vidb;
656         struct ubi_vid_hdr *vid_hdr;
657         int dst_leb_clean = 0;
658
659         kfree(wrk);
660         if (shutdown)
661                 return 0;
662
663         vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
664         if (!vidb)
665                 return -ENOMEM;
666
667         vid_hdr = ubi_get_vid_hdr(vidb);
668
669         down_read(&ubi->fm_eba_sem);
670         mutex_lock(&ubi->move_mutex);
671         spin_lock(&ubi->wl_lock);
672         ubi_assert(!ubi->move_from && !ubi->move_to);
673         ubi_assert(!ubi->move_to_put);
674
675         if (!ubi->free.rb_node ||
676             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
677                 /*
678                  * No free physical eraseblocks? Well, they must be waiting in
679                  * the queue to be erased. Cancel movement - it will be
680                  * triggered again when a free physical eraseblock appears.
681                  *
682                  * No used physical eraseblocks? They must be temporarily
683                  * protected from being moved. They will be moved to the
684                  * @ubi->used tree later and the wear-leveling will be
685                  * triggered again.
686                  */
687                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
688                        !ubi->free.rb_node, !ubi->used.rb_node);
689                 goto out_cancel;
690         }
691
692 #ifdef CONFIG_MTD_UBI_FASTMAP
693         /* Check whether we need to produce an anchor PEB */
694         if (!anchor)
695                 anchor = !anchor_pebs_available(&ubi->free);
696
697         if (anchor) {
698                 e1 = find_anchor_wl_entry(&ubi->used);
699                 if (!e1)
700                         goto out_cancel;
701                 e2 = get_peb_for_wl(ubi);
702                 if (!e2)
703                         goto out_cancel;
704
705                 self_check_in_wl_tree(ubi, e1, &ubi->used);
706                 rb_erase(&e1->u.rb, &ubi->used);
707                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
708         } else if (!ubi->scrub.rb_node) {
709 #else
710         if (!ubi->scrub.rb_node) {
711 #endif
712                 /*
713                  * Now pick the least worn-out used physical eraseblock and a
714                  * highly worn-out free physical eraseblock. If the erase
715                  * counters differ much enough, start wear-leveling.
716                  */
717                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
718                 e2 = get_peb_for_wl(ubi);
719                 if (!e2)
720                         goto out_cancel;
721
722                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
723                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
724                                e1->ec, e2->ec);
725
726                         /* Give the unused PEB back */
727                         wl_tree_add(e2, &ubi->free);
728                         ubi->free_count++;
729                         goto out_cancel;
730                 }
731                 self_check_in_wl_tree(ubi, e1, &ubi->used);
732                 rb_erase(&e1->u.rb, &ubi->used);
733                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
734                        e1->pnum, e1->ec, e2->pnum, e2->ec);
735         } else {
736                 /* Perform scrubbing */
737                 scrubbing = 1;
738                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
739                 e2 = get_peb_for_wl(ubi);
740                 if (!e2)
741                         goto out_cancel;
742
743                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
744                 rb_erase(&e1->u.rb, &ubi->scrub);
745                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
746         }
747
748         ubi->move_from = e1;
749         ubi->move_to = e2;
750         spin_unlock(&ubi->wl_lock);
751
752         /*
753          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
754          * We so far do not know which logical eraseblock our physical
755          * eraseblock (@e1) belongs to. We have to read the volume identifier
756          * header first.
757          *
758          * Note, we are protected from this PEB being unmapped and erased. The
759          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
760          * which is being moved was unmapped.
761          */
762
763         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
764         if (err && err != UBI_IO_BITFLIPS) {
765                 dst_leb_clean = 1;
766                 if (err == UBI_IO_FF) {
767                         /*
768                          * We are trying to move PEB without a VID header. UBI
769                          * always write VID headers shortly after the PEB was
770                          * given, so we have a situation when it has not yet
771                          * had a chance to write it, because it was preempted.
772                          * So add this PEB to the protection queue so far,
773                          * because presumably more data will be written there
774                          * (including the missing VID header), and then we'll
775                          * move it.
776                          */
777                         dbg_wl("PEB %d has no VID header", e1->pnum);
778                         protect = 1;
779                         goto out_not_moved;
780                 } else if (err == UBI_IO_FF_BITFLIPS) {
781                         /*
782                          * The same situation as %UBI_IO_FF, but bit-flips were
783                          * detected. It is better to schedule this PEB for
784                          * scrubbing.
785                          */
786                         dbg_wl("PEB %d has no VID header but has bit-flips",
787                                e1->pnum);
788                         scrubbing = 1;
789                         goto out_not_moved;
790                 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
791                         /*
792                          * While a full scan would detect interrupted erasures
793                          * at attach time we can face them here when attached from
794                          * Fastmap.
795                          */
796                         dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
797                                e1->pnum);
798                         erase = 1;
799                         goto out_not_moved;
800                 }
801
802                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
803                         err, e1->pnum);
804                 goto out_error;
805         }
806
807         vol_id = be32_to_cpu(vid_hdr->vol_id);
808         lnum = be32_to_cpu(vid_hdr->lnum);
809
810         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
811         if (err) {
812                 if (err == MOVE_CANCEL_RACE) {
813                         /*
814                          * The LEB has not been moved because the volume is
815                          * being deleted or the PEB has been put meanwhile. We
816                          * should prevent this PEB from being selected for
817                          * wear-leveling movement again, so put it to the
818                          * protection queue.
819                          */
820                         protect = 1;
821                         dst_leb_clean = 1;
822                         goto out_not_moved;
823                 }
824                 if (err == MOVE_RETRY) {
825                         scrubbing = 1;
826                         dst_leb_clean = 1;
827                         goto out_not_moved;
828                 }
829                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
830                     err == MOVE_TARGET_RD_ERR) {
831                         /*
832                          * Target PEB had bit-flips or write error - torture it.
833                          */
834                         torture = 1;
835                         keep = 1;
836                         goto out_not_moved;
837                 }
838
839                 if (err == MOVE_SOURCE_RD_ERR) {
840                         /*
841                          * An error happened while reading the source PEB. Do
842                          * not switch to R/O mode in this case, and give the
843                          * upper layers a possibility to recover from this,
844                          * e.g. by unmapping corresponding LEB. Instead, just
845                          * put this PEB to the @ubi->erroneous list to prevent
846                          * UBI from trying to move it over and over again.
847                          */
848                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
849                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
850                                         ubi->erroneous_peb_count);
851                                 goto out_error;
852                         }
853                         dst_leb_clean = 1;
854                         erroneous = 1;
855                         goto out_not_moved;
856                 }
857
858                 if (err < 0)
859                         goto out_error;
860
861                 ubi_assert(0);
862         }
863
864         /* The PEB has been successfully moved */
865         if (scrubbing)
866                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
867                         e1->pnum, vol_id, lnum, e2->pnum);
868         ubi_free_vid_buf(vidb);
869
870         spin_lock(&ubi->wl_lock);
871         if (!ubi->move_to_put) {
872                 wl_tree_add(e2, &ubi->used);
873                 e2 = NULL;
874         }
875         ubi->move_from = ubi->move_to = NULL;
876         ubi->move_to_put = ubi->wl_scheduled = 0;
877         spin_unlock(&ubi->wl_lock);
878
879         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
880         if (err) {
881                 if (e2)
882                         wl_entry_destroy(ubi, e2);
883                 goto out_ro;
884         }
885
886         if (e2) {
887                 /*
888                  * Well, the target PEB was put meanwhile, schedule it for
889                  * erasure.
890                  */
891                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
892                        e2->pnum, vol_id, lnum);
893                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
894                 if (err)
895                         goto out_ro;
896         }
897
898         dbg_wl("done");
899         mutex_unlock(&ubi->move_mutex);
900         up_read(&ubi->fm_eba_sem);
901         return 0;
902
903         /*
904          * For some reasons the LEB was not moved, might be an error, might be
905          * something else. @e1 was not changed, so return it back. @e2 might
906          * have been changed, schedule it for erasure.
907          */
908 out_not_moved:
909         if (vol_id != -1)
910                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
911                        e1->pnum, vol_id, lnum, e2->pnum, err);
912         else
913                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
914                        e1->pnum, e2->pnum, err);
915         spin_lock(&ubi->wl_lock);
916         if (protect)
917                 prot_queue_add(ubi, e1);
918         else if (erroneous) {
919                 wl_tree_add(e1, &ubi->erroneous);
920                 ubi->erroneous_peb_count += 1;
921         } else if (scrubbing)
922                 wl_tree_add(e1, &ubi->scrub);
923         else if (keep)
924                 wl_tree_add(e1, &ubi->used);
925         if (dst_leb_clean) {
926                 wl_tree_add(e2, &ubi->free);
927                 ubi->free_count++;
928         }
929
930         ubi_assert(!ubi->move_to_put);
931         ubi->move_from = ubi->move_to = NULL;
932         ubi->wl_scheduled = 0;
933         spin_unlock(&ubi->wl_lock);
934
935         ubi_free_vid_buf(vidb);
936         if (dst_leb_clean) {
937                 ensure_wear_leveling(ubi, 1);
938         } else {
939                 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
940                 if (err)
941                         goto out_ro;
942         }
943
944         if (erase) {
945                 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
946                 if (err)
947                         goto out_ro;
948         }
949
950         mutex_unlock(&ubi->move_mutex);
951         up_read(&ubi->fm_eba_sem);
952         return 0;
953
954 out_error:
955         if (vol_id != -1)
956                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
957                         err, e1->pnum, e2->pnum);
958         else
959                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
960                         err, e1->pnum, vol_id, lnum, e2->pnum);
961         spin_lock(&ubi->wl_lock);
962         ubi->move_from = ubi->move_to = NULL;
963         ubi->move_to_put = ubi->wl_scheduled = 0;
964         spin_unlock(&ubi->wl_lock);
965
966         ubi_free_vid_buf(vidb);
967         wl_entry_destroy(ubi, e1);
968         wl_entry_destroy(ubi, e2);
969
970 out_ro:
971         ubi_ro_mode(ubi);
972         mutex_unlock(&ubi->move_mutex);
973         up_read(&ubi->fm_eba_sem);
974         ubi_assert(err != 0);
975         return err < 0 ? err : -EIO;
976
977 out_cancel:
978         ubi->wl_scheduled = 0;
979         spin_unlock(&ubi->wl_lock);
980         mutex_unlock(&ubi->move_mutex);
981         up_read(&ubi->fm_eba_sem);
982         ubi_free_vid_buf(vidb);
983         return 0;
984 }
985
986 /**
987  * ensure_wear_leveling - schedule wear-leveling if it is needed.
988  * @ubi: UBI device description object
989  * @nested: set to non-zero if this function is called from UBI worker
990  *
991  * This function checks if it is time to start wear-leveling and schedules it
992  * if yes. This function returns zero in case of success and a negative error
993  * code in case of failure.
994  */
995 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
996 {
997         int err = 0;
998         struct ubi_wl_entry *e1;
999         struct ubi_wl_entry *e2;
1000         struct ubi_work *wrk;
1001
1002         spin_lock(&ubi->wl_lock);
1003         if (ubi->wl_scheduled)
1004                 /* Wear-leveling is already in the work queue */
1005                 goto out_unlock;
1006
1007         /*
1008          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1009          * the WL worker has to be scheduled anyway.
1010          */
1011         if (!ubi->scrub.rb_node) {
1012                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1013                         /* No physical eraseblocks - no deal */
1014                         goto out_unlock;
1015
1016                 /*
1017                  * We schedule wear-leveling only if the difference between the
1018                  * lowest erase counter of used physical eraseblocks and a high
1019                  * erase counter of free physical eraseblocks is greater than
1020                  * %UBI_WL_THRESHOLD.
1021                  */
1022                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1023                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1024
1025                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1026                         goto out_unlock;
1027                 dbg_wl("schedule wear-leveling");
1028         } else
1029                 dbg_wl("schedule scrubbing");
1030
1031         ubi->wl_scheduled = 1;
1032         spin_unlock(&ubi->wl_lock);
1033
1034         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1035         if (!wrk) {
1036                 err = -ENOMEM;
1037                 goto out_cancel;
1038         }
1039
1040         wrk->anchor = 0;
1041         wrk->func = &wear_leveling_worker;
1042         if (nested)
1043                 __schedule_ubi_work(ubi, wrk);
1044         else
1045                 schedule_ubi_work(ubi, wrk);
1046         return err;
1047
1048 out_cancel:
1049         spin_lock(&ubi->wl_lock);
1050         ubi->wl_scheduled = 0;
1051 out_unlock:
1052         spin_unlock(&ubi->wl_lock);
1053         return err;
1054 }
1055
1056 /**
1057  * __erase_worker - physical eraseblock erase worker function.
1058  * @ubi: UBI device description object
1059  * @wl_wrk: the work object
1060  * @shutdown: non-zero if the worker has to free memory and exit
1061  * because the WL sub-system is shutting down
1062  *
1063  * This function erases a physical eraseblock and perform torture testing if
1064  * needed. It also takes care about marking the physical eraseblock bad if
1065  * needed. Returns zero in case of success and a negative error code in case of
1066  * failure.
1067  */
1068 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1069 {
1070         struct ubi_wl_entry *e = wl_wrk->e;
1071         int pnum = e->pnum;
1072         int vol_id = wl_wrk->vol_id;
1073         int lnum = wl_wrk->lnum;
1074         int err, available_consumed = 0;
1075
1076         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1077                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1078
1079         err = sync_erase(ubi, e, wl_wrk->torture);
1080         if (!err) {
1081                 spin_lock(&ubi->wl_lock);
1082                 wl_tree_add(e, &ubi->free);
1083                 ubi->free_count++;
1084                 spin_unlock(&ubi->wl_lock);
1085
1086                 /*
1087                  * One more erase operation has happened, take care about
1088                  * protected physical eraseblocks.
1089                  */
1090                 serve_prot_queue(ubi);
1091
1092                 /* And take care about wear-leveling */
1093                 err = ensure_wear_leveling(ubi, 1);
1094                 return err;
1095         }
1096
1097         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1098
1099         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1100             err == -EBUSY) {
1101                 int err1;
1102
1103                 /* Re-schedule the LEB for erasure */
1104                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1105                 if (err1) {
1106                         wl_entry_destroy(ubi, e);
1107                         err = err1;
1108                         goto out_ro;
1109                 }
1110                 return err;
1111         }
1112
1113         wl_entry_destroy(ubi, e);
1114         if (err != -EIO)
1115                 /*
1116                  * If this is not %-EIO, we have no idea what to do. Scheduling
1117                  * this physical eraseblock for erasure again would cause
1118                  * errors again and again. Well, lets switch to R/O mode.
1119                  */
1120                 goto out_ro;
1121
1122         /* It is %-EIO, the PEB went bad */
1123
1124         if (!ubi->bad_allowed) {
1125                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1126                 goto out_ro;
1127         }
1128
1129         spin_lock(&ubi->volumes_lock);
1130         if (ubi->beb_rsvd_pebs == 0) {
1131                 if (ubi->avail_pebs == 0) {
1132                         spin_unlock(&ubi->volumes_lock);
1133                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1134                         goto out_ro;
1135                 }
1136                 ubi->avail_pebs -= 1;
1137                 available_consumed = 1;
1138         }
1139         spin_unlock(&ubi->volumes_lock);
1140
1141         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1142         err = ubi_io_mark_bad(ubi, pnum);
1143         if (err)
1144                 goto out_ro;
1145
1146         spin_lock(&ubi->volumes_lock);
1147         if (ubi->beb_rsvd_pebs > 0) {
1148                 if (available_consumed) {
1149                         /*
1150                          * The amount of reserved PEBs increased since we last
1151                          * checked.
1152                          */
1153                         ubi->avail_pebs += 1;
1154                         available_consumed = 0;
1155                 }
1156                 ubi->beb_rsvd_pebs -= 1;
1157         }
1158         ubi->bad_peb_count += 1;
1159         ubi->good_peb_count -= 1;
1160         ubi_calculate_reserved(ubi);
1161         if (available_consumed)
1162                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1163         else if (ubi->beb_rsvd_pebs)
1164                 ubi_msg(ubi, "%d PEBs left in the reserve",
1165                         ubi->beb_rsvd_pebs);
1166         else
1167                 ubi_warn(ubi, "last PEB from the reserve was used");
1168         spin_unlock(&ubi->volumes_lock);
1169
1170         return err;
1171
1172 out_ro:
1173         if (available_consumed) {
1174                 spin_lock(&ubi->volumes_lock);
1175                 ubi->avail_pebs += 1;
1176                 spin_unlock(&ubi->volumes_lock);
1177         }
1178         ubi_ro_mode(ubi);
1179         return err;
1180 }
1181
1182 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1183                           int shutdown)
1184 {
1185         int ret;
1186
1187         if (shutdown) {
1188                 struct ubi_wl_entry *e = wl_wrk->e;
1189
1190                 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1191                 kfree(wl_wrk);
1192                 wl_entry_destroy(ubi, e);
1193                 return 0;
1194         }
1195
1196         ret = __erase_worker(ubi, wl_wrk);
1197         kfree(wl_wrk);
1198         return ret;
1199 }
1200
1201 /**
1202  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1203  * @ubi: UBI device description object
1204  * @vol_id: the volume ID that last used this PEB
1205  * @lnum: the last used logical eraseblock number for the PEB
1206  * @pnum: physical eraseblock to return
1207  * @torture: if this physical eraseblock has to be tortured
1208  *
1209  * This function is called to return physical eraseblock @pnum to the pool of
1210  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1211  * occurred to this @pnum and it has to be tested. This function returns zero
1212  * in case of success, and a negative error code in case of failure.
1213  */
1214 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1215                    int pnum, int torture)
1216 {
1217         int err;
1218         struct ubi_wl_entry *e;
1219
1220         dbg_wl("PEB %d", pnum);
1221         ubi_assert(pnum >= 0);
1222         ubi_assert(pnum < ubi->peb_count);
1223
1224         down_read(&ubi->fm_protect);
1225
1226 retry:
1227         spin_lock(&ubi->wl_lock);
1228         e = ubi->lookuptbl[pnum];
1229         if (e == ubi->move_from) {
1230                 /*
1231                  * User is putting the physical eraseblock which was selected to
1232                  * be moved. It will be scheduled for erasure in the
1233                  * wear-leveling worker.
1234                  */
1235                 dbg_wl("PEB %d is being moved, wait", pnum);
1236                 spin_unlock(&ubi->wl_lock);
1237
1238                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1239                 mutex_lock(&ubi->move_mutex);
1240                 mutex_unlock(&ubi->move_mutex);
1241                 goto retry;
1242         } else if (e == ubi->move_to) {
1243                 /*
1244                  * User is putting the physical eraseblock which was selected
1245                  * as the target the data is moved to. It may happen if the EBA
1246                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1247                  * but the WL sub-system has not put the PEB to the "used" tree
1248                  * yet, but it is about to do this. So we just set a flag which
1249                  * will tell the WL worker that the PEB is not needed anymore
1250                  * and should be scheduled for erasure.
1251                  */
1252                 dbg_wl("PEB %d is the target of data moving", pnum);
1253                 ubi_assert(!ubi->move_to_put);
1254                 ubi->move_to_put = 1;
1255                 spin_unlock(&ubi->wl_lock);
1256                 up_read(&ubi->fm_protect);
1257                 return 0;
1258         } else {
1259                 if (in_wl_tree(e, &ubi->used)) {
1260                         self_check_in_wl_tree(ubi, e, &ubi->used);
1261                         rb_erase(&e->u.rb, &ubi->used);
1262                 } else if (in_wl_tree(e, &ubi->scrub)) {
1263                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1264                         rb_erase(&e->u.rb, &ubi->scrub);
1265                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1266                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1267                         rb_erase(&e->u.rb, &ubi->erroneous);
1268                         ubi->erroneous_peb_count -= 1;
1269                         ubi_assert(ubi->erroneous_peb_count >= 0);
1270                         /* Erroneous PEBs should be tortured */
1271                         torture = 1;
1272                 } else {
1273                         err = prot_queue_del(ubi, e->pnum);
1274                         if (err) {
1275                                 ubi_err(ubi, "PEB %d not found", pnum);
1276                                 ubi_ro_mode(ubi);
1277                                 spin_unlock(&ubi->wl_lock);
1278                                 up_read(&ubi->fm_protect);
1279                                 return err;
1280                         }
1281                 }
1282         }
1283         spin_unlock(&ubi->wl_lock);
1284
1285         err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1286         if (err) {
1287                 spin_lock(&ubi->wl_lock);
1288                 wl_tree_add(e, &ubi->used);
1289                 spin_unlock(&ubi->wl_lock);
1290         }
1291
1292         up_read(&ubi->fm_protect);
1293         return err;
1294 }
1295
1296 /**
1297  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1298  * @ubi: UBI device description object
1299  * @pnum: the physical eraseblock to schedule
1300  *
1301  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1302  * needs scrubbing. This function schedules a physical eraseblock for
1303  * scrubbing which is done in background. This function returns zero in case of
1304  * success and a negative error code in case of failure.
1305  */
1306 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1307 {
1308         struct ubi_wl_entry *e;
1309
1310         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1311
1312 retry:
1313         spin_lock(&ubi->wl_lock);
1314         e = ubi->lookuptbl[pnum];
1315         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1316                                    in_wl_tree(e, &ubi->erroneous)) {
1317                 spin_unlock(&ubi->wl_lock);
1318                 return 0;
1319         }
1320
1321         if (e == ubi->move_to) {
1322                 /*
1323                  * This physical eraseblock was used to move data to. The data
1324                  * was moved but the PEB was not yet inserted to the proper
1325                  * tree. We should just wait a little and let the WL worker
1326                  * proceed.
1327                  */
1328                 spin_unlock(&ubi->wl_lock);
1329                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1330                 yield();
1331                 goto retry;
1332         }
1333
1334         if (in_wl_tree(e, &ubi->used)) {
1335                 self_check_in_wl_tree(ubi, e, &ubi->used);
1336                 rb_erase(&e->u.rb, &ubi->used);
1337         } else {
1338                 int err;
1339
1340                 err = prot_queue_del(ubi, e->pnum);
1341                 if (err) {
1342                         ubi_err(ubi, "PEB %d not found", pnum);
1343                         ubi_ro_mode(ubi);
1344                         spin_unlock(&ubi->wl_lock);
1345                         return err;
1346                 }
1347         }
1348
1349         wl_tree_add(e, &ubi->scrub);
1350         spin_unlock(&ubi->wl_lock);
1351
1352         /*
1353          * Technically scrubbing is the same as wear-leveling, so it is done
1354          * by the WL worker.
1355          */
1356         return ensure_wear_leveling(ubi, 0);
1357 }
1358
1359 /**
1360  * ubi_wl_flush - flush all pending works.
1361  * @ubi: UBI device description object
1362  * @vol_id: the volume id to flush for
1363  * @lnum: the logical eraseblock number to flush for
1364  *
1365  * This function executes all pending works for a particular volume id /
1366  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1367  * acts as a wildcard for all of the corresponding volume numbers or logical
1368  * eraseblock numbers. It returns zero in case of success and a negative error
1369  * code in case of failure.
1370  */
1371 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1372 {
1373         int err = 0;
1374         int found = 1;
1375
1376         /*
1377          * Erase while the pending works queue is not empty, but not more than
1378          * the number of currently pending works.
1379          */
1380         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1381                vol_id, lnum, ubi->works_count);
1382
1383         while (found) {
1384                 struct ubi_work *wrk, *tmp;
1385                 found = 0;
1386
1387                 down_read(&ubi->work_sem);
1388                 spin_lock(&ubi->wl_lock);
1389                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1390                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1391                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1392                                 list_del(&wrk->list);
1393                                 ubi->works_count -= 1;
1394                                 ubi_assert(ubi->works_count >= 0);
1395                                 spin_unlock(&ubi->wl_lock);
1396
1397                                 err = wrk->func(ubi, wrk, 0);
1398                                 if (err) {
1399                                         up_read(&ubi->work_sem);
1400                                         return err;
1401                                 }
1402
1403                                 spin_lock(&ubi->wl_lock);
1404                                 found = 1;
1405                                 break;
1406                         }
1407                 }
1408                 spin_unlock(&ubi->wl_lock);
1409                 up_read(&ubi->work_sem);
1410         }
1411
1412         /*
1413          * Make sure all the works which have been done in parallel are
1414          * finished.
1415          */
1416         down_write(&ubi->work_sem);
1417         up_write(&ubi->work_sem);
1418
1419         return err;
1420 }
1421
1422 /**
1423  * tree_destroy - destroy an RB-tree.
1424  * @ubi: UBI device description object
1425  * @root: the root of the tree to destroy
1426  */
1427 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1428 {
1429         struct rb_node *rb;
1430         struct ubi_wl_entry *e;
1431
1432         rb = root->rb_node;
1433         while (rb) {
1434                 if (rb->rb_left)
1435                         rb = rb->rb_left;
1436                 else if (rb->rb_right)
1437                         rb = rb->rb_right;
1438                 else {
1439                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1440
1441                         rb = rb_parent(rb);
1442                         if (rb) {
1443                                 if (rb->rb_left == &e->u.rb)
1444                                         rb->rb_left = NULL;
1445                                 else
1446                                         rb->rb_right = NULL;
1447                         }
1448
1449                         wl_entry_destroy(ubi, e);
1450                 }
1451         }
1452 }
1453
1454 /**
1455  * ubi_thread - UBI background thread.
1456  * @u: the UBI device description object pointer
1457  */
1458 int ubi_thread(void *u)
1459 {
1460         int failures = 0;
1461         struct ubi_device *ubi = u;
1462
1463         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1464                 ubi->bgt_name, task_pid_nr(current));
1465
1466         set_freezable();
1467         for (;;) {
1468                 int err;
1469
1470                 if (kthread_should_stop())
1471                         break;
1472
1473                 if (try_to_freeze())
1474                         continue;
1475
1476                 spin_lock(&ubi->wl_lock);
1477                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1478                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1479                         set_current_state(TASK_INTERRUPTIBLE);
1480                         spin_unlock(&ubi->wl_lock);
1481                         schedule();
1482                         continue;
1483                 }
1484                 spin_unlock(&ubi->wl_lock);
1485
1486                 err = do_work(ubi);
1487                 if (err) {
1488                         ubi_err(ubi, "%s: work failed with error code %d",
1489                                 ubi->bgt_name, err);
1490                         if (failures++ > WL_MAX_FAILURES) {
1491                                 /*
1492                                  * Too many failures, disable the thread and
1493                                  * switch to read-only mode.
1494                                  */
1495                                 ubi_msg(ubi, "%s: %d consecutive failures",
1496                                         ubi->bgt_name, WL_MAX_FAILURES);
1497                                 ubi_ro_mode(ubi);
1498                                 ubi->thread_enabled = 0;
1499                                 continue;
1500                         }
1501                 } else
1502                         failures = 0;
1503
1504                 cond_resched();
1505         }
1506
1507         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1508         return 0;
1509 }
1510
1511 /**
1512  * shutdown_work - shutdown all pending works.
1513  * @ubi: UBI device description object
1514  */
1515 static void shutdown_work(struct ubi_device *ubi)
1516 {
1517 #ifdef CONFIG_MTD_UBI_FASTMAP
1518         flush_work(&ubi->fm_work);
1519 #endif
1520         while (!list_empty(&ubi->works)) {
1521                 struct ubi_work *wrk;
1522
1523                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1524                 list_del(&wrk->list);
1525                 wrk->func(ubi, wrk, 1);
1526                 ubi->works_count -= 1;
1527                 ubi_assert(ubi->works_count >= 0);
1528         }
1529 }
1530
1531 /**
1532  * erase_aeb - erase a PEB given in UBI attach info PEB
1533  * @ubi: UBI device description object
1534  * @aeb: UBI attach info PEB
1535  * @sync: If true, erase synchronously. Otherwise schedule for erasure
1536  */
1537 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1538 {
1539         struct ubi_wl_entry *e;
1540         int err;
1541
1542         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1543         if (!e)
1544                 return -ENOMEM;
1545
1546         e->pnum = aeb->pnum;
1547         e->ec = aeb->ec;
1548         ubi->lookuptbl[e->pnum] = e;
1549
1550         if (sync) {
1551                 err = sync_erase(ubi, e, false);
1552                 if (err)
1553                         goto out_free;
1554
1555                 wl_tree_add(e, &ubi->free);
1556                 ubi->free_count++;
1557         } else {
1558                 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1559                 if (err)
1560                         goto out_free;
1561         }
1562
1563         return 0;
1564
1565 out_free:
1566         wl_entry_destroy(ubi, e);
1567
1568         return err;
1569 }
1570
1571 /**
1572  * ubi_wl_init - initialize the WL sub-system using attaching information.
1573  * @ubi: UBI device description object
1574  * @ai: attaching information
1575  *
1576  * This function returns zero in case of success, and a negative error code in
1577  * case of failure.
1578  */
1579 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1580 {
1581         int err, i, reserved_pebs, found_pebs = 0;
1582         struct rb_node *rb1, *rb2;
1583         struct ubi_ainf_volume *av;
1584         struct ubi_ainf_peb *aeb, *tmp;
1585         struct ubi_wl_entry *e;
1586
1587         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1588         spin_lock_init(&ubi->wl_lock);
1589         mutex_init(&ubi->move_mutex);
1590         init_rwsem(&ubi->work_sem);
1591         ubi->max_ec = ai->max_ec;
1592         INIT_LIST_HEAD(&ubi->works);
1593
1594         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1595
1596         err = -ENOMEM;
1597         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1598         if (!ubi->lookuptbl)
1599                 return err;
1600
1601         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1602                 INIT_LIST_HEAD(&ubi->pq[i]);
1603         ubi->pq_head = 0;
1604
1605         ubi->free_count = 0;
1606         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1607                 cond_resched();
1608
1609                 err = erase_aeb(ubi, aeb, false);
1610                 if (err)
1611                         goto out_free;
1612
1613                 found_pebs++;
1614         }
1615
1616         list_for_each_entry(aeb, &ai->free, u.list) {
1617                 cond_resched();
1618
1619                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1620                 if (!e) {
1621                         err = -ENOMEM;
1622                         goto out_free;
1623                 }
1624
1625                 e->pnum = aeb->pnum;
1626                 e->ec = aeb->ec;
1627                 ubi_assert(e->ec >= 0);
1628
1629                 wl_tree_add(e, &ubi->free);
1630                 ubi->free_count++;
1631
1632                 ubi->lookuptbl[e->pnum] = e;
1633
1634                 found_pebs++;
1635         }
1636
1637         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1638                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1639                         cond_resched();
1640
1641                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1642                         if (!e) {
1643                                 err = -ENOMEM;
1644                                 goto out_free;
1645                         }
1646
1647                         e->pnum = aeb->pnum;
1648                         e->ec = aeb->ec;
1649                         ubi->lookuptbl[e->pnum] = e;
1650
1651                         if (!aeb->scrub) {
1652                                 dbg_wl("add PEB %d EC %d to the used tree",
1653                                        e->pnum, e->ec);
1654                                 wl_tree_add(e, &ubi->used);
1655                         } else {
1656                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1657                                        e->pnum, e->ec);
1658                                 wl_tree_add(e, &ubi->scrub);
1659                         }
1660
1661                         found_pebs++;
1662                 }
1663         }
1664
1665         list_for_each_entry(aeb, &ai->fastmap, u.list) {
1666                 cond_resched();
1667
1668                 e = ubi_find_fm_block(ubi, aeb->pnum);
1669
1670                 if (e) {
1671                         ubi_assert(!ubi->lookuptbl[e->pnum]);
1672                         ubi->lookuptbl[e->pnum] = e;
1673                 } else {
1674                         bool sync = false;
1675
1676                         /*
1677                          * Usually old Fastmap PEBs are scheduled for erasure
1678                          * and we don't have to care about them but if we face
1679                          * an power cut before scheduling them we need to
1680                          * take care of them here.
1681                          */
1682                         if (ubi->lookuptbl[aeb->pnum])
1683                                 continue;
1684
1685                         /*
1686                          * The fastmap update code might not find a free PEB for
1687                          * writing the fastmap anchor to and then reuses the
1688                          * current fastmap anchor PEB. When this PEB gets erased
1689                          * and a power cut happens before it is written again we
1690                          * must make sure that the fastmap attach code doesn't
1691                          * find any outdated fastmap anchors, hence we erase the
1692                          * outdated fastmap anchor PEBs synchronously here.
1693                          */
1694                         if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1695                                 sync = true;
1696
1697                         err = erase_aeb(ubi, aeb, sync);
1698                         if (err)
1699                                 goto out_free;
1700                 }
1701
1702                 found_pebs++;
1703         }
1704
1705         dbg_wl("found %i PEBs", found_pebs);
1706
1707         ubi_assert(ubi->good_peb_count == found_pebs);
1708
1709         reserved_pebs = WL_RESERVED_PEBS;
1710         ubi_fastmap_init(ubi, &reserved_pebs);
1711
1712         if (ubi->avail_pebs < reserved_pebs) {
1713                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1714                         ubi->avail_pebs, reserved_pebs);
1715                 if (ubi->corr_peb_count)
1716                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1717                                 ubi->corr_peb_count);
1718                 err = -ENOSPC;
1719                 goto out_free;
1720         }
1721         ubi->avail_pebs -= reserved_pebs;
1722         ubi->rsvd_pebs += reserved_pebs;
1723
1724         /* Schedule wear-leveling if needed */
1725         err = ensure_wear_leveling(ubi, 0);
1726         if (err)
1727                 goto out_free;
1728
1729         return 0;
1730
1731 out_free:
1732         shutdown_work(ubi);
1733         tree_destroy(ubi, &ubi->used);
1734         tree_destroy(ubi, &ubi->free);
1735         tree_destroy(ubi, &ubi->scrub);
1736         kfree(ubi->lookuptbl);
1737         return err;
1738 }
1739
1740 /**
1741  * protection_queue_destroy - destroy the protection queue.
1742  * @ubi: UBI device description object
1743  */
1744 static void protection_queue_destroy(struct ubi_device *ubi)
1745 {
1746         int i;
1747         struct ubi_wl_entry *e, *tmp;
1748
1749         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1750                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1751                         list_del(&e->u.list);
1752                         wl_entry_destroy(ubi, e);
1753                 }
1754         }
1755 }
1756
1757 /**
1758  * ubi_wl_close - close the wear-leveling sub-system.
1759  * @ubi: UBI device description object
1760  */
1761 void ubi_wl_close(struct ubi_device *ubi)
1762 {
1763         dbg_wl("close the WL sub-system");
1764         ubi_fastmap_close(ubi);
1765         shutdown_work(ubi);
1766         protection_queue_destroy(ubi);
1767         tree_destroy(ubi, &ubi->used);
1768         tree_destroy(ubi, &ubi->erroneous);
1769         tree_destroy(ubi, &ubi->free);
1770         tree_destroy(ubi, &ubi->scrub);
1771         kfree(ubi->lookuptbl);
1772 }
1773
1774 /**
1775  * self_check_ec - make sure that the erase counter of a PEB is correct.
1776  * @ubi: UBI device description object
1777  * @pnum: the physical eraseblock number to check
1778  * @ec: the erase counter to check
1779  *
1780  * This function returns zero if the erase counter of physical eraseblock @pnum
1781  * is equivalent to @ec, and a negative error code if not or if an error
1782  * occurred.
1783  */
1784 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1785 {
1786         int err;
1787         long long read_ec;
1788         struct ubi_ec_hdr *ec_hdr;
1789
1790         if (!ubi_dbg_chk_gen(ubi))
1791                 return 0;
1792
1793         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1794         if (!ec_hdr)
1795                 return -ENOMEM;
1796
1797         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1798         if (err && err != UBI_IO_BITFLIPS) {
1799                 /* The header does not have to exist */
1800                 err = 0;
1801                 goto out_free;
1802         }
1803
1804         read_ec = be64_to_cpu(ec_hdr->ec);
1805         if (ec != read_ec && read_ec - ec > 1) {
1806                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1807                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1808                 dump_stack();
1809                 err = 1;
1810         } else
1811                 err = 0;
1812
1813 out_free:
1814         kfree(ec_hdr);
1815         return err;
1816 }
1817
1818 /**
1819  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1820  * @ubi: UBI device description object
1821  * @e: the wear-leveling entry to check
1822  * @root: the root of the tree
1823  *
1824  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1825  * is not.
1826  */
1827 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1828                                  struct ubi_wl_entry *e, struct rb_root *root)
1829 {
1830         if (!ubi_dbg_chk_gen(ubi))
1831                 return 0;
1832
1833         if (in_wl_tree(e, root))
1834                 return 0;
1835
1836         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
1837                 e->pnum, e->ec, root);
1838         dump_stack();
1839         return -EINVAL;
1840 }
1841
1842 /**
1843  * self_check_in_pq - check if wear-leveling entry is in the protection
1844  *                        queue.
1845  * @ubi: UBI device description object
1846  * @e: the wear-leveling entry to check
1847  *
1848  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
1849  */
1850 static int self_check_in_pq(const struct ubi_device *ubi,
1851                             struct ubi_wl_entry *e)
1852 {
1853         struct ubi_wl_entry *p;
1854         int i;
1855
1856         if (!ubi_dbg_chk_gen(ubi))
1857                 return 0;
1858
1859         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
1860                 list_for_each_entry(p, &ubi->pq[i], u.list)
1861                         if (p == e)
1862                                 return 0;
1863
1864         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
1865                 e->pnum, e->ec);
1866         dump_stack();
1867         return -EINVAL;
1868 }
1869 #ifndef CONFIG_MTD_UBI_FASTMAP
1870 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
1871 {
1872         struct ubi_wl_entry *e;
1873
1874         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1875         self_check_in_wl_tree(ubi, e, &ubi->free);
1876         ubi->free_count--;
1877         ubi_assert(ubi->free_count >= 0);
1878         rb_erase(&e->u.rb, &ubi->free);
1879
1880         return e;
1881 }
1882
1883 /**
1884  * produce_free_peb - produce a free physical eraseblock.
1885  * @ubi: UBI device description object
1886  *
1887  * This function tries to make a free PEB by means of synchronous execution of
1888  * pending works. This may be needed if, for example the background thread is
1889  * disabled. Returns zero in case of success and a negative error code in case
1890  * of failure.
1891  */
1892 static int produce_free_peb(struct ubi_device *ubi)
1893 {
1894         int err;
1895
1896         while (!ubi->free.rb_node && ubi->works_count) {
1897                 spin_unlock(&ubi->wl_lock);
1898
1899                 dbg_wl("do one work synchronously");
1900                 err = do_work(ubi);
1901
1902                 spin_lock(&ubi->wl_lock);
1903                 if (err)
1904                         return err;
1905         }
1906
1907         return 0;
1908 }
1909
1910 /**
1911  * ubi_wl_get_peb - get a physical eraseblock.
1912  * @ubi: UBI device description object
1913  *
1914  * This function returns a physical eraseblock in case of success and a
1915  * negative error code in case of failure.
1916  * Returns with ubi->fm_eba_sem held in read mode!
1917  */
1918 int ubi_wl_get_peb(struct ubi_device *ubi)
1919 {
1920         int err;
1921         struct ubi_wl_entry *e;
1922
1923 retry:
1924         down_read(&ubi->fm_eba_sem);
1925         spin_lock(&ubi->wl_lock);
1926         if (!ubi->free.rb_node) {
1927                 if (ubi->works_count == 0) {
1928                         ubi_err(ubi, "no free eraseblocks");
1929                         ubi_assert(list_empty(&ubi->works));
1930                         spin_unlock(&ubi->wl_lock);
1931                         return -ENOSPC;
1932                 }
1933
1934                 err = produce_free_peb(ubi);
1935                 if (err < 0) {
1936                         spin_unlock(&ubi->wl_lock);
1937                         return err;
1938                 }
1939                 spin_unlock(&ubi->wl_lock);
1940                 up_read(&ubi->fm_eba_sem);
1941                 goto retry;
1942
1943         }
1944         e = wl_get_wle(ubi);
1945         prot_queue_add(ubi, e);
1946         spin_unlock(&ubi->wl_lock);
1947
1948         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
1949                                     ubi->peb_size - ubi->vid_hdr_aloffset);
1950         if (err) {
1951                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
1952                 return err;
1953         }
1954
1955         return e->pnum;
1956 }
1957 #else
1958 #include "fastmap-wl.c"
1959 #endif