sparc64: Implement HAVE_CONTEXT_TRACKING
[cascardo/linux.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
44  * bad.
45  *
46  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
47  * in a physical eraseblock, it has to be moved. Technically this is the same
48  * as moving it for wear-leveling reasons.
49  *
50  * As it was said, for the UBI sub-system all physical eraseblocks are either
51  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
52  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
53  * RB-trees, as well as (temporarily) in the @wl->pq queue.
54  *
55  * When the WL sub-system returns a physical eraseblock, the physical
56  * eraseblock is protected from being moved for some "time". For this reason,
57  * the physical eraseblock is not directly moved from the @wl->free tree to the
58  * @wl->used tree. There is a protection queue in between where this
59  * physical eraseblock is temporarily stored (@wl->pq).
60  *
61  * All this protection stuff is needed because:
62  *  o we don't want to move physical eraseblocks just after we have given them
63  *    to the user; instead, we first want to let users fill them up with data;
64  *
65  *  o there is a chance that the user will put the physical eraseblock very
66  *    soon, so it makes sense not to move it for some time, but wait.
67  *
68  * Physical eraseblocks stay protected only for limited time. But the "time" is
69  * measured in erase cycles in this case. This is implemented with help of the
70  * protection queue. Eraseblocks are put to the tail of this queue when they
71  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
72  * head of the queue on each erase operation (for any eraseblock). So the
73  * length of the queue defines how may (global) erase cycles PEBs are protected.
74  *
75  * To put it differently, each physical eraseblock has 2 main states: free and
76  * used. The former state corresponds to the @wl->free tree. The latter state
77  * is split up on several sub-states:
78  * o the WL movement is allowed (@wl->used tree);
79  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
80  *   erroneous - e.g., there was a read error;
81  * o the WL movement is temporarily prohibited (@wl->pq queue);
82  * o scrubbing is needed (@wl->scrub tree).
83  *
84  * Depending on the sub-state, wear-leveling entries of the used physical
85  * eraseblocks may be kept in one of those structures.
86  *
87  * Note, in this implementation, we keep a small in-RAM object for each physical
88  * eraseblock. This is surely not a scalable solution. But it appears to be good
89  * enough for moderately large flashes and it is simple. In future, one may
90  * re-work this sub-system and make it more scalable.
91  *
92  * At the moment this sub-system does not utilize the sequence number, which
93  * was introduced relatively recently. But it would be wise to do this because
94  * the sequence number of a logical eraseblock characterizes how old is it. For
95  * example, when we move a PEB with low erase counter, and we need to pick the
96  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
97  * pick target PEB with an average EC if our PEB is not very "old". This is a
98  * room for future re-works of the WL sub-system.
99  */
100
101 #include <linux/slab.h>
102 #include <linux/crc32.h>
103 #include <linux/freezer.h>
104 #include <linux/kthread.h>
105 #include "ubi.h"
106
107 /* Number of physical eraseblocks reserved for wear-leveling purposes */
108 #define WL_RESERVED_PEBS 1
109
110 /*
111  * Maximum difference between two erase counters. If this threshold is
112  * exceeded, the WL sub-system starts moving data from used physical
113  * eraseblocks with low erase counter to free physical eraseblocks with high
114  * erase counter.
115  */
116 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
117
118 /*
119  * When a physical eraseblock is moved, the WL sub-system has to pick the target
120  * physical eraseblock to move to. The simplest way would be just to pick the
121  * one with the highest erase counter. But in certain workloads this could lead
122  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
123  * situation when the picked physical eraseblock is constantly erased after the
124  * data is written to it. So, we have a constant which limits the highest erase
125  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
126  * does not pick eraseblocks with erase counter greater than the lowest erase
127  * counter plus %WL_FREE_MAX_DIFF.
128  */
129 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
130
131 /*
132  * Maximum number of consecutive background thread failures which is enough to
133  * switch to read-only mode.
134  */
135 #define WL_MAX_FAILURES 32
136
137 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
138 static int self_check_in_wl_tree(const struct ubi_device *ubi,
139                                  struct ubi_wl_entry *e, struct rb_root *root);
140 static int self_check_in_pq(const struct ubi_device *ubi,
141                             struct ubi_wl_entry *e);
142
143 #ifdef CONFIG_MTD_UBI_FASTMAP
144 /**
145  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
146  * @wrk: the work description object
147  */
148 static void update_fastmap_work_fn(struct work_struct *wrk)
149 {
150         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
151         ubi_update_fastmap(ubi);
152 }
153
154 /**
155  *  ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
156  *  @ubi: UBI device description object
157  *  @pnum: the to be checked PEB
158  */
159 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
160 {
161         int i;
162
163         if (!ubi->fm)
164                 return 0;
165
166         for (i = 0; i < ubi->fm->used_blocks; i++)
167                 if (ubi->fm->e[i]->pnum == pnum)
168                         return 1;
169
170         return 0;
171 }
172 #else
173 static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
174 {
175         return 0;
176 }
177 #endif
178
179 /**
180  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
181  * @e: the wear-leveling entry to add
182  * @root: the root of the tree
183  *
184  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
185  * the @ubi->used and @ubi->free RB-trees.
186  */
187 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
188 {
189         struct rb_node **p, *parent = NULL;
190
191         p = &root->rb_node;
192         while (*p) {
193                 struct ubi_wl_entry *e1;
194
195                 parent = *p;
196                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
197
198                 if (e->ec < e1->ec)
199                         p = &(*p)->rb_left;
200                 else if (e->ec > e1->ec)
201                         p = &(*p)->rb_right;
202                 else {
203                         ubi_assert(e->pnum != e1->pnum);
204                         if (e->pnum < e1->pnum)
205                                 p = &(*p)->rb_left;
206                         else
207                                 p = &(*p)->rb_right;
208                 }
209         }
210
211         rb_link_node(&e->u.rb, parent, p);
212         rb_insert_color(&e->u.rb, root);
213 }
214
215 /**
216  * do_work - do one pending work.
217  * @ubi: UBI device description object
218  *
219  * This function returns zero in case of success and a negative error code in
220  * case of failure.
221  */
222 static int do_work(struct ubi_device *ubi)
223 {
224         int err;
225         struct ubi_work *wrk;
226
227         cond_resched();
228
229         /*
230          * @ubi->work_sem is used to synchronize with the workers. Workers take
231          * it in read mode, so many of them may be doing works at a time. But
232          * the queue flush code has to be sure the whole queue of works is
233          * done, and it takes the mutex in write mode.
234          */
235         down_read(&ubi->work_sem);
236         spin_lock(&ubi->wl_lock);
237         if (list_empty(&ubi->works)) {
238                 spin_unlock(&ubi->wl_lock);
239                 up_read(&ubi->work_sem);
240                 return 0;
241         }
242
243         wrk = list_entry(ubi->works.next, struct ubi_work, list);
244         list_del(&wrk->list);
245         ubi->works_count -= 1;
246         ubi_assert(ubi->works_count >= 0);
247         spin_unlock(&ubi->wl_lock);
248
249         /*
250          * Call the worker function. Do not touch the work structure
251          * after this call as it will have been freed or reused by that
252          * time by the worker function.
253          */
254         err = wrk->func(ubi, wrk, 0);
255         if (err)
256                 ubi_err("work failed with error code %d", err);
257         up_read(&ubi->work_sem);
258
259         return err;
260 }
261
262 /**
263  * produce_free_peb - produce a free physical eraseblock.
264  * @ubi: UBI device description object
265  *
266  * This function tries to make a free PEB by means of synchronous execution of
267  * pending works. This may be needed if, for example the background thread is
268  * disabled. Returns zero in case of success and a negative error code in case
269  * of failure.
270  */
271 static int produce_free_peb(struct ubi_device *ubi)
272 {
273         int err;
274
275         while (!ubi->free.rb_node) {
276                 spin_unlock(&ubi->wl_lock);
277
278                 dbg_wl("do one work synchronously");
279                 err = do_work(ubi);
280
281                 spin_lock(&ubi->wl_lock);
282                 if (err)
283                         return err;
284         }
285
286         return 0;
287 }
288
289 /**
290  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
291  * @e: the wear-leveling entry to check
292  * @root: the root of the tree
293  *
294  * This function returns non-zero if @e is in the @root RB-tree and zero if it
295  * is not.
296  */
297 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
298 {
299         struct rb_node *p;
300
301         p = root->rb_node;
302         while (p) {
303                 struct ubi_wl_entry *e1;
304
305                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
306
307                 if (e->pnum == e1->pnum) {
308                         ubi_assert(e == e1);
309                         return 1;
310                 }
311
312                 if (e->ec < e1->ec)
313                         p = p->rb_left;
314                 else if (e->ec > e1->ec)
315                         p = p->rb_right;
316                 else {
317                         ubi_assert(e->pnum != e1->pnum);
318                         if (e->pnum < e1->pnum)
319                                 p = p->rb_left;
320                         else
321                                 p = p->rb_right;
322                 }
323         }
324
325         return 0;
326 }
327
328 /**
329  * prot_queue_add - add physical eraseblock to the protection queue.
330  * @ubi: UBI device description object
331  * @e: the physical eraseblock to add
332  *
333  * This function adds @e to the tail of the protection queue @ubi->pq, where
334  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
335  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
336  * be locked.
337  */
338 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
339 {
340         int pq_tail = ubi->pq_head - 1;
341
342         if (pq_tail < 0)
343                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
344         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
345         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
346         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
347 }
348
349 /**
350  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
351  * @ubi: UBI device description object
352  * @root: the RB-tree where to look for
353  * @diff: maximum possible difference from the smallest erase counter
354  *
355  * This function looks for a wear leveling entry with erase counter closest to
356  * min + @diff, where min is the smallest erase counter.
357  */
358 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
359                                           struct rb_root *root, int diff)
360 {
361         struct rb_node *p;
362         struct ubi_wl_entry *e, *prev_e = NULL;
363         int max;
364
365         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
366         max = e->ec + diff;
367
368         p = root->rb_node;
369         while (p) {
370                 struct ubi_wl_entry *e1;
371
372                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
373                 if (e1->ec >= max)
374                         p = p->rb_left;
375                 else {
376                         p = p->rb_right;
377                         prev_e = e;
378                         e = e1;
379                 }
380         }
381
382         /* If no fastmap has been written and this WL entry can be used
383          * as anchor PEB, hold it back and return the second best WL entry
384          * such that fastmap can use the anchor PEB later. */
385         if (prev_e && !ubi->fm_disabled &&
386             !ubi->fm && e->pnum < UBI_FM_MAX_START)
387                 return prev_e;
388
389         return e;
390 }
391
392 /**
393  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
394  * @ubi: UBI device description object
395  * @root: the RB-tree where to look for
396  *
397  * This function looks for a wear leveling entry with medium erase counter,
398  * but not greater or equivalent than the lowest erase counter plus
399  * %WL_FREE_MAX_DIFF/2.
400  */
401 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
402                                                struct rb_root *root)
403 {
404         struct ubi_wl_entry *e, *first, *last;
405
406         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
407         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
408
409         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
410                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
411
412 #ifdef CONFIG_MTD_UBI_FASTMAP
413                 /* If no fastmap has been written and this WL entry can be used
414                  * as anchor PEB, hold it back and return the second best
415                  * WL entry such that fastmap can use the anchor PEB later. */
416                 if (e && !ubi->fm_disabled && !ubi->fm &&
417                     e->pnum < UBI_FM_MAX_START)
418                         e = rb_entry(rb_next(root->rb_node),
419                                      struct ubi_wl_entry, u.rb);
420 #endif
421         } else
422                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
423
424         return e;
425 }
426
427 #ifdef CONFIG_MTD_UBI_FASTMAP
428 /**
429  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
430  * @root: the RB-tree where to look for
431  */
432 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
433 {
434         struct rb_node *p;
435         struct ubi_wl_entry *e, *victim = NULL;
436         int max_ec = UBI_MAX_ERASECOUNTER;
437
438         ubi_rb_for_each_entry(p, e, root, u.rb) {
439                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
440                         victim = e;
441                         max_ec = e->ec;
442                 }
443         }
444
445         return victim;
446 }
447
448 static int anchor_pebs_avalible(struct rb_root *root)
449 {
450         struct rb_node *p;
451         struct ubi_wl_entry *e;
452
453         ubi_rb_for_each_entry(p, e, root, u.rb)
454                 if (e->pnum < UBI_FM_MAX_START)
455                         return 1;
456
457         return 0;
458 }
459
460 /**
461  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
462  * @ubi: UBI device description object
463  * @anchor: This PEB will be used as anchor PEB by fastmap
464  *
465  * The function returns a physical erase block with a given maximal number
466  * and removes it from the wl subsystem.
467  * Must be called with wl_lock held!
468  */
469 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470 {
471         struct ubi_wl_entry *e = NULL;
472
473         if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
474                 goto out;
475
476         if (anchor)
477                 e = find_anchor_wl_entry(&ubi->free);
478         else
479                 e = find_mean_wl_entry(ubi, &ubi->free);
480
481         if (!e)
482                 goto out;
483
484         self_check_in_wl_tree(ubi, e, &ubi->free);
485
486         /* remove it from the free list,
487          * the wl subsystem does no longer know this erase block */
488         rb_erase(&e->u.rb, &ubi->free);
489         ubi->free_count--;
490 out:
491         return e;
492 }
493 #endif
494
495 /**
496  * __wl_get_peb - get a physical eraseblock.
497  * @ubi: UBI device description object
498  *
499  * This function returns a physical eraseblock in case of success and a
500  * negative error code in case of failure.
501  */
502 static int __wl_get_peb(struct ubi_device *ubi)
503 {
504         int err;
505         struct ubi_wl_entry *e;
506
507 retry:
508         if (!ubi->free.rb_node) {
509                 if (ubi->works_count == 0) {
510                         ubi_err("no free eraseblocks");
511                         ubi_assert(list_empty(&ubi->works));
512                         return -ENOSPC;
513                 }
514
515                 err = produce_free_peb(ubi);
516                 if (err < 0)
517                         return err;
518                 goto retry;
519         }
520
521         e = find_mean_wl_entry(ubi, &ubi->free);
522         if (!e) {
523                 ubi_err("no free eraseblocks");
524                 return -ENOSPC;
525         }
526
527         self_check_in_wl_tree(ubi, e, &ubi->free);
528
529         /*
530          * Move the physical eraseblock to the protection queue where it will
531          * be protected from being moved for some time.
532          */
533         rb_erase(&e->u.rb, &ubi->free);
534         ubi->free_count--;
535         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
536 #ifndef CONFIG_MTD_UBI_FASTMAP
537         /* We have to enqueue e only if fastmap is disabled,
538          * is fastmap enabled prot_queue_add() will be called by
539          * ubi_wl_get_peb() after removing e from the pool. */
540         prot_queue_add(ubi, e);
541 #endif
542         return e->pnum;
543 }
544
545 #ifdef CONFIG_MTD_UBI_FASTMAP
546 /**
547  * return_unused_pool_pebs - returns unused PEB to the free tree.
548  * @ubi: UBI device description object
549  * @pool: fastmap pool description object
550  */
551 static void return_unused_pool_pebs(struct ubi_device *ubi,
552                                     struct ubi_fm_pool *pool)
553 {
554         int i;
555         struct ubi_wl_entry *e;
556
557         for (i = pool->used; i < pool->size; i++) {
558                 e = ubi->lookuptbl[pool->pebs[i]];
559                 wl_tree_add(e, &ubi->free);
560                 ubi->free_count++;
561         }
562 }
563
564 /**
565  * refill_wl_pool - refills all the fastmap pool used by the
566  * WL sub-system.
567  * @ubi: UBI device description object
568  */
569 static void refill_wl_pool(struct ubi_device *ubi)
570 {
571         struct ubi_wl_entry *e;
572         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
573
574         return_unused_pool_pebs(ubi, pool);
575
576         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
577                 if (!ubi->free.rb_node ||
578                    (ubi->free_count - ubi->beb_rsvd_pebs < 5))
579                         break;
580
581                 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
582                 self_check_in_wl_tree(ubi, e, &ubi->free);
583                 rb_erase(&e->u.rb, &ubi->free);
584                 ubi->free_count--;
585
586                 pool->pebs[pool->size] = e->pnum;
587         }
588         pool->used = 0;
589 }
590
591 /**
592  * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
593  * @ubi: UBI device description object
594  */
595 static void refill_wl_user_pool(struct ubi_device *ubi)
596 {
597         struct ubi_fm_pool *pool = &ubi->fm_pool;
598
599         return_unused_pool_pebs(ubi, pool);
600
601         for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
602                 if (!ubi->free.rb_node ||
603                    (ubi->free_count - ubi->beb_rsvd_pebs < 1))
604                         break;
605
606                 pool->pebs[pool->size] = __wl_get_peb(ubi);
607                 if (pool->pebs[pool->size] < 0)
608                         break;
609         }
610         pool->used = 0;
611 }
612
613 /**
614  * ubi_refill_pools - refills all fastmap PEB pools.
615  * @ubi: UBI device description object
616  */
617 void ubi_refill_pools(struct ubi_device *ubi)
618 {
619         spin_lock(&ubi->wl_lock);
620         refill_wl_pool(ubi);
621         refill_wl_user_pool(ubi);
622         spin_unlock(&ubi->wl_lock);
623 }
624
625 /* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
626  * the fastmap pool.
627  */
628 int ubi_wl_get_peb(struct ubi_device *ubi)
629 {
630         int ret;
631         struct ubi_fm_pool *pool = &ubi->fm_pool;
632         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
633
634         if (!pool->size || !wl_pool->size || pool->used == pool->size ||
635             wl_pool->used == wl_pool->size)
636                 ubi_update_fastmap(ubi);
637
638         /* we got not a single free PEB */
639         if (!pool->size)
640                 ret = -ENOSPC;
641         else {
642                 spin_lock(&ubi->wl_lock);
643                 ret = pool->pebs[pool->used++];
644                 prot_queue_add(ubi, ubi->lookuptbl[ret]);
645                 spin_unlock(&ubi->wl_lock);
646         }
647
648         return ret;
649 }
650
651 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
652  *
653  * @ubi: UBI device description object
654  */
655 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
656 {
657         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
658         int pnum;
659
660         if (pool->used == pool->size || !pool->size) {
661                 /* We cannot update the fastmap here because this
662                  * function is called in atomic context.
663                  * Let's fail here and refill/update it as soon as possible. */
664                 schedule_work(&ubi->fm_work);
665                 return NULL;
666         } else {
667                 pnum = pool->pebs[pool->used++];
668                 return ubi->lookuptbl[pnum];
669         }
670 }
671 #else
672 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
673 {
674         struct ubi_wl_entry *e;
675
676         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
677         self_check_in_wl_tree(ubi, e, &ubi->free);
678         rb_erase(&e->u.rb, &ubi->free);
679
680         return e;
681 }
682
683 int ubi_wl_get_peb(struct ubi_device *ubi)
684 {
685         int peb, err;
686
687         spin_lock(&ubi->wl_lock);
688         peb = __wl_get_peb(ubi);
689         spin_unlock(&ubi->wl_lock);
690
691         err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
692                                     ubi->peb_size - ubi->vid_hdr_aloffset);
693         if (err) {
694                 ubi_err("new PEB %d does not contain all 0xFF bytes", peb);
695                 return err;
696         }
697
698         return peb;
699 }
700 #endif
701
702 /**
703  * prot_queue_del - remove a physical eraseblock from the protection queue.
704  * @ubi: UBI device description object
705  * @pnum: the physical eraseblock to remove
706  *
707  * This function deletes PEB @pnum from the protection queue and returns zero
708  * in case of success and %-ENODEV if the PEB was not found.
709  */
710 static int prot_queue_del(struct ubi_device *ubi, int pnum)
711 {
712         struct ubi_wl_entry *e;
713
714         e = ubi->lookuptbl[pnum];
715         if (!e)
716                 return -ENODEV;
717
718         if (self_check_in_pq(ubi, e))
719                 return -ENODEV;
720
721         list_del(&e->u.list);
722         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
723         return 0;
724 }
725
726 /**
727  * sync_erase - synchronously erase a physical eraseblock.
728  * @ubi: UBI device description object
729  * @e: the the physical eraseblock to erase
730  * @torture: if the physical eraseblock has to be tortured
731  *
732  * This function returns zero in case of success and a negative error code in
733  * case of failure.
734  */
735 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
736                       int torture)
737 {
738         int err;
739         struct ubi_ec_hdr *ec_hdr;
740         unsigned long long ec = e->ec;
741
742         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
743
744         err = self_check_ec(ubi, e->pnum, e->ec);
745         if (err)
746                 return -EINVAL;
747
748         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
749         if (!ec_hdr)
750                 return -ENOMEM;
751
752         err = ubi_io_sync_erase(ubi, e->pnum, torture);
753         if (err < 0)
754                 goto out_free;
755
756         ec += err;
757         if (ec > UBI_MAX_ERASECOUNTER) {
758                 /*
759                  * Erase counter overflow. Upgrade UBI and use 64-bit
760                  * erase counters internally.
761                  */
762                 ubi_err("erase counter overflow at PEB %d, EC %llu",
763                         e->pnum, ec);
764                 err = -EINVAL;
765                 goto out_free;
766         }
767
768         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
769
770         ec_hdr->ec = cpu_to_be64(ec);
771
772         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
773         if (err)
774                 goto out_free;
775
776         e->ec = ec;
777         spin_lock(&ubi->wl_lock);
778         if (e->ec > ubi->max_ec)
779                 ubi->max_ec = e->ec;
780         spin_unlock(&ubi->wl_lock);
781
782 out_free:
783         kfree(ec_hdr);
784         return err;
785 }
786
787 /**
788  * serve_prot_queue - check if it is time to stop protecting PEBs.
789  * @ubi: UBI device description object
790  *
791  * This function is called after each erase operation and removes PEBs from the
792  * tail of the protection queue. These PEBs have been protected for long enough
793  * and should be moved to the used tree.
794  */
795 static void serve_prot_queue(struct ubi_device *ubi)
796 {
797         struct ubi_wl_entry *e, *tmp;
798         int count;
799
800         /*
801          * There may be several protected physical eraseblock to remove,
802          * process them all.
803          */
804 repeat:
805         count = 0;
806         spin_lock(&ubi->wl_lock);
807         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
808                 dbg_wl("PEB %d EC %d protection over, move to used tree",
809                         e->pnum, e->ec);
810
811                 list_del(&e->u.list);
812                 wl_tree_add(e, &ubi->used);
813                 if (count++ > 32) {
814                         /*
815                          * Let's be nice and avoid holding the spinlock for
816                          * too long.
817                          */
818                         spin_unlock(&ubi->wl_lock);
819                         cond_resched();
820                         goto repeat;
821                 }
822         }
823
824         ubi->pq_head += 1;
825         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
826                 ubi->pq_head = 0;
827         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
828         spin_unlock(&ubi->wl_lock);
829 }
830
831 /**
832  * __schedule_ubi_work - schedule a work.
833  * @ubi: UBI device description object
834  * @wrk: the work to schedule
835  *
836  * This function adds a work defined by @wrk to the tail of the pending works
837  * list. Can only be used of ubi->work_sem is already held in read mode!
838  */
839 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
840 {
841         spin_lock(&ubi->wl_lock);
842         list_add_tail(&wrk->list, &ubi->works);
843         ubi_assert(ubi->works_count >= 0);
844         ubi->works_count += 1;
845         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
846                 wake_up_process(ubi->bgt_thread);
847         spin_unlock(&ubi->wl_lock);
848 }
849
850 /**
851  * schedule_ubi_work - schedule a work.
852  * @ubi: UBI device description object
853  * @wrk: the work to schedule
854  *
855  * This function adds a work defined by @wrk to the tail of the pending works
856  * list.
857  */
858 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
859 {
860         down_read(&ubi->work_sem);
861         __schedule_ubi_work(ubi, wrk);
862         up_read(&ubi->work_sem);
863 }
864
865 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
866                         int cancel);
867
868 #ifdef CONFIG_MTD_UBI_FASTMAP
869 /**
870  * ubi_is_erase_work - checks whether a work is erase work.
871  * @wrk: The work object to be checked
872  */
873 int ubi_is_erase_work(struct ubi_work *wrk)
874 {
875         return wrk->func == erase_worker;
876 }
877 #endif
878
879 /**
880  * schedule_erase - schedule an erase work.
881  * @ubi: UBI device description object
882  * @e: the WL entry of the physical eraseblock to erase
883  * @vol_id: the volume ID that last used this PEB
884  * @lnum: the last used logical eraseblock number for the PEB
885  * @torture: if the physical eraseblock has to be tortured
886  *
887  * This function returns zero in case of success and a %-ENOMEM in case of
888  * failure.
889  */
890 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
891                           int vol_id, int lnum, int torture)
892 {
893         struct ubi_work *wl_wrk;
894
895         ubi_assert(e);
896         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
897
898         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
899                e->pnum, e->ec, torture);
900
901         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
902         if (!wl_wrk)
903                 return -ENOMEM;
904
905         wl_wrk->func = &erase_worker;
906         wl_wrk->e = e;
907         wl_wrk->vol_id = vol_id;
908         wl_wrk->lnum = lnum;
909         wl_wrk->torture = torture;
910
911         schedule_ubi_work(ubi, wl_wrk);
912         return 0;
913 }
914
915 /**
916  * do_sync_erase - run the erase worker synchronously.
917  * @ubi: UBI device description object
918  * @e: the WL entry of the physical eraseblock to erase
919  * @vol_id: the volume ID that last used this PEB
920  * @lnum: the last used logical eraseblock number for the PEB
921  * @torture: if the physical eraseblock has to be tortured
922  *
923  */
924 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
925                          int vol_id, int lnum, int torture)
926 {
927         struct ubi_work *wl_wrk;
928
929         dbg_wl("sync erase of PEB %i", e->pnum);
930
931         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
932         if (!wl_wrk)
933                 return -ENOMEM;
934
935         wl_wrk->e = e;
936         wl_wrk->vol_id = vol_id;
937         wl_wrk->lnum = lnum;
938         wl_wrk->torture = torture;
939
940         return erase_worker(ubi, wl_wrk, 0);
941 }
942
943 #ifdef CONFIG_MTD_UBI_FASTMAP
944 /**
945  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
946  * sub-system.
947  * see: ubi_wl_put_peb()
948  *
949  * @ubi: UBI device description object
950  * @fm_e: physical eraseblock to return
951  * @lnum: the last used logical eraseblock number for the PEB
952  * @torture: if this physical eraseblock has to be tortured
953  */
954 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
955                       int lnum, int torture)
956 {
957         struct ubi_wl_entry *e;
958         int vol_id, pnum = fm_e->pnum;
959
960         dbg_wl("PEB %d", pnum);
961
962         ubi_assert(pnum >= 0);
963         ubi_assert(pnum < ubi->peb_count);
964
965         spin_lock(&ubi->wl_lock);
966         e = ubi->lookuptbl[pnum];
967
968         /* This can happen if we recovered from a fastmap the very
969          * first time and writing now a new one. In this case the wl system
970          * has never seen any PEB used by the original fastmap.
971          */
972         if (!e) {
973                 e = fm_e;
974                 ubi_assert(e->ec >= 0);
975                 ubi->lookuptbl[pnum] = e;
976         } else {
977                 e->ec = fm_e->ec;
978                 kfree(fm_e);
979         }
980
981         spin_unlock(&ubi->wl_lock);
982
983         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
984         return schedule_erase(ubi, e, vol_id, lnum, torture);
985 }
986 #endif
987
988 /**
989  * wear_leveling_worker - wear-leveling worker function.
990  * @ubi: UBI device description object
991  * @wrk: the work object
992  * @cancel: non-zero if the worker has to free memory and exit
993  *
994  * This function copies a more worn out physical eraseblock to a less worn out
995  * one. Returns zero in case of success and a negative error code in case of
996  * failure.
997  */
998 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
999                                 int cancel)
1000 {
1001         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
1002         int vol_id = -1, uninitialized_var(lnum);
1003 #ifdef CONFIG_MTD_UBI_FASTMAP
1004         int anchor = wrk->anchor;
1005 #endif
1006         struct ubi_wl_entry *e1, *e2;
1007         struct ubi_vid_hdr *vid_hdr;
1008
1009         kfree(wrk);
1010         if (cancel)
1011                 return 0;
1012
1013         vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
1014         if (!vid_hdr)
1015                 return -ENOMEM;
1016
1017         mutex_lock(&ubi->move_mutex);
1018         spin_lock(&ubi->wl_lock);
1019         ubi_assert(!ubi->move_from && !ubi->move_to);
1020         ubi_assert(!ubi->move_to_put);
1021
1022         if (!ubi->free.rb_node ||
1023             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
1024                 /*
1025                  * No free physical eraseblocks? Well, they must be waiting in
1026                  * the queue to be erased. Cancel movement - it will be
1027                  * triggered again when a free physical eraseblock appears.
1028                  *
1029                  * No used physical eraseblocks? They must be temporarily
1030                  * protected from being moved. They will be moved to the
1031                  * @ubi->used tree later and the wear-leveling will be
1032                  * triggered again.
1033                  */
1034                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
1035                        !ubi->free.rb_node, !ubi->used.rb_node);
1036                 goto out_cancel;
1037         }
1038
1039 #ifdef CONFIG_MTD_UBI_FASTMAP
1040         /* Check whether we need to produce an anchor PEB */
1041         if (!anchor)
1042                 anchor = !anchor_pebs_avalible(&ubi->free);
1043
1044         if (anchor) {
1045                 e1 = find_anchor_wl_entry(&ubi->used);
1046                 if (!e1)
1047                         goto out_cancel;
1048                 e2 = get_peb_for_wl(ubi);
1049                 if (!e2)
1050                         goto out_cancel;
1051
1052                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1053                 rb_erase(&e1->u.rb, &ubi->used);
1054                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
1055         } else if (!ubi->scrub.rb_node) {
1056 #else
1057         if (!ubi->scrub.rb_node) {
1058 #endif
1059                 /*
1060                  * Now pick the least worn-out used physical eraseblock and a
1061                  * highly worn-out free physical eraseblock. If the erase
1062                  * counters differ much enough, start wear-leveling.
1063                  */
1064                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1065                 e2 = get_peb_for_wl(ubi);
1066                 if (!e2)
1067                         goto out_cancel;
1068
1069                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
1070                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
1071                                e1->ec, e2->ec);
1072
1073                         /* Give the unused PEB back */
1074                         wl_tree_add(e2, &ubi->free);
1075                         goto out_cancel;
1076                 }
1077                 self_check_in_wl_tree(ubi, e1, &ubi->used);
1078                 rb_erase(&e1->u.rb, &ubi->used);
1079                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
1080                        e1->pnum, e1->ec, e2->pnum, e2->ec);
1081         } else {
1082                 /* Perform scrubbing */
1083                 scrubbing = 1;
1084                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1085                 e2 = get_peb_for_wl(ubi);
1086                 if (!e2)
1087                         goto out_cancel;
1088
1089                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
1090                 rb_erase(&e1->u.rb, &ubi->scrub);
1091                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
1092         }
1093
1094         ubi->move_from = e1;
1095         ubi->move_to = e2;
1096         spin_unlock(&ubi->wl_lock);
1097
1098         /*
1099          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
1100          * We so far do not know which logical eraseblock our physical
1101          * eraseblock (@e1) belongs to. We have to read the volume identifier
1102          * header first.
1103          *
1104          * Note, we are protected from this PEB being unmapped and erased. The
1105          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
1106          * which is being moved was unmapped.
1107          */
1108
1109         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
1110         if (err && err != UBI_IO_BITFLIPS) {
1111                 if (err == UBI_IO_FF) {
1112                         /*
1113                          * We are trying to move PEB without a VID header. UBI
1114                          * always write VID headers shortly after the PEB was
1115                          * given, so we have a situation when it has not yet
1116                          * had a chance to write it, because it was preempted.
1117                          * So add this PEB to the protection queue so far,
1118                          * because presumably more data will be written there
1119                          * (including the missing VID header), and then we'll
1120                          * move it.
1121                          */
1122                         dbg_wl("PEB %d has no VID header", e1->pnum);
1123                         protect = 1;
1124                         goto out_not_moved;
1125                 } else if (err == UBI_IO_FF_BITFLIPS) {
1126                         /*
1127                          * The same situation as %UBI_IO_FF, but bit-flips were
1128                          * detected. It is better to schedule this PEB for
1129                          * scrubbing.
1130                          */
1131                         dbg_wl("PEB %d has no VID header but has bit-flips",
1132                                e1->pnum);
1133                         scrubbing = 1;
1134                         goto out_not_moved;
1135                 }
1136
1137                 ubi_err("error %d while reading VID header from PEB %d",
1138                         err, e1->pnum);
1139                 goto out_error;
1140         }
1141
1142         vol_id = be32_to_cpu(vid_hdr->vol_id);
1143         lnum = be32_to_cpu(vid_hdr->lnum);
1144
1145         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
1146         if (err) {
1147                 if (err == MOVE_CANCEL_RACE) {
1148                         /*
1149                          * The LEB has not been moved because the volume is
1150                          * being deleted or the PEB has been put meanwhile. We
1151                          * should prevent this PEB from being selected for
1152                          * wear-leveling movement again, so put it to the
1153                          * protection queue.
1154                          */
1155                         protect = 1;
1156                         goto out_not_moved;
1157                 }
1158                 if (err == MOVE_RETRY) {
1159                         scrubbing = 1;
1160                         goto out_not_moved;
1161                 }
1162                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
1163                     err == MOVE_TARGET_RD_ERR) {
1164                         /*
1165                          * Target PEB had bit-flips or write error - torture it.
1166                          */
1167                         torture = 1;
1168                         goto out_not_moved;
1169                 }
1170
1171                 if (err == MOVE_SOURCE_RD_ERR) {
1172                         /*
1173                          * An error happened while reading the source PEB. Do
1174                          * not switch to R/O mode in this case, and give the
1175                          * upper layers a possibility to recover from this,
1176                          * e.g. by unmapping corresponding LEB. Instead, just
1177                          * put this PEB to the @ubi->erroneous list to prevent
1178                          * UBI from trying to move it over and over again.
1179                          */
1180                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1181                                 ubi_err("too many erroneous eraseblocks (%d)",
1182                                         ubi->erroneous_peb_count);
1183                                 goto out_error;
1184                         }
1185                         erroneous = 1;
1186                         goto out_not_moved;
1187                 }
1188
1189                 if (err < 0)
1190                         goto out_error;
1191
1192                 ubi_assert(0);
1193         }
1194
1195         /* The PEB has been successfully moved */
1196         if (scrubbing)
1197                 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1198                         e1->pnum, vol_id, lnum, e2->pnum);
1199         ubi_free_vid_hdr(ubi, vid_hdr);
1200
1201         spin_lock(&ubi->wl_lock);
1202         if (!ubi->move_to_put) {
1203                 wl_tree_add(e2, &ubi->used);
1204                 e2 = NULL;
1205         }
1206         ubi->move_from = ubi->move_to = NULL;
1207         ubi->move_to_put = ubi->wl_scheduled = 0;
1208         spin_unlock(&ubi->wl_lock);
1209
1210         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1211         if (err) {
1212                 kmem_cache_free(ubi_wl_entry_slab, e1);
1213                 if (e2)
1214                         kmem_cache_free(ubi_wl_entry_slab, e2);
1215                 goto out_ro;
1216         }
1217
1218         if (e2) {
1219                 /*
1220                  * Well, the target PEB was put meanwhile, schedule it for
1221                  * erasure.
1222                  */
1223                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1224                        e2->pnum, vol_id, lnum);
1225                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1226                 if (err) {
1227                         kmem_cache_free(ubi_wl_entry_slab, e2);
1228                         goto out_ro;
1229                 }
1230         }
1231
1232         dbg_wl("done");
1233         mutex_unlock(&ubi->move_mutex);
1234         return 0;
1235
1236         /*
1237          * For some reasons the LEB was not moved, might be an error, might be
1238          * something else. @e1 was not changed, so return it back. @e2 might
1239          * have been changed, schedule it for erasure.
1240          */
1241 out_not_moved:
1242         if (vol_id != -1)
1243                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
1244                        e1->pnum, vol_id, lnum, e2->pnum, err);
1245         else
1246                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
1247                        e1->pnum, e2->pnum, err);
1248         spin_lock(&ubi->wl_lock);
1249         if (protect)
1250                 prot_queue_add(ubi, e1);
1251         else if (erroneous) {
1252                 wl_tree_add(e1, &ubi->erroneous);
1253                 ubi->erroneous_peb_count += 1;
1254         } else if (scrubbing)
1255                 wl_tree_add(e1, &ubi->scrub);
1256         else
1257                 wl_tree_add(e1, &ubi->used);
1258         ubi_assert(!ubi->move_to_put);
1259         ubi->move_from = ubi->move_to = NULL;
1260         ubi->wl_scheduled = 0;
1261         spin_unlock(&ubi->wl_lock);
1262
1263         ubi_free_vid_hdr(ubi, vid_hdr);
1264         err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1265         if (err) {
1266                 kmem_cache_free(ubi_wl_entry_slab, e2);
1267                 goto out_ro;
1268         }
1269         mutex_unlock(&ubi->move_mutex);
1270         return 0;
1271
1272 out_error:
1273         if (vol_id != -1)
1274                 ubi_err("error %d while moving PEB %d to PEB %d",
1275                         err, e1->pnum, e2->pnum);
1276         else
1277                 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1278                         err, e1->pnum, vol_id, lnum, e2->pnum);
1279         spin_lock(&ubi->wl_lock);
1280         ubi->move_from = ubi->move_to = NULL;
1281         ubi->move_to_put = ubi->wl_scheduled = 0;
1282         spin_unlock(&ubi->wl_lock);
1283
1284         ubi_free_vid_hdr(ubi, vid_hdr);
1285         kmem_cache_free(ubi_wl_entry_slab, e1);
1286         kmem_cache_free(ubi_wl_entry_slab, e2);
1287
1288 out_ro:
1289         ubi_ro_mode(ubi);
1290         mutex_unlock(&ubi->move_mutex);
1291         ubi_assert(err != 0);
1292         return err < 0 ? err : -EIO;
1293
1294 out_cancel:
1295         ubi->wl_scheduled = 0;
1296         spin_unlock(&ubi->wl_lock);
1297         mutex_unlock(&ubi->move_mutex);
1298         ubi_free_vid_hdr(ubi, vid_hdr);
1299         return 0;
1300 }
1301
1302 /**
1303  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1304  * @ubi: UBI device description object
1305  * @nested: set to non-zero if this function is called from UBI worker
1306  *
1307  * This function checks if it is time to start wear-leveling and schedules it
1308  * if yes. This function returns zero in case of success and a negative error
1309  * code in case of failure.
1310  */
1311 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1312 {
1313         int err = 0;
1314         struct ubi_wl_entry *e1;
1315         struct ubi_wl_entry *e2;
1316         struct ubi_work *wrk;
1317
1318         spin_lock(&ubi->wl_lock);
1319         if (ubi->wl_scheduled)
1320                 /* Wear-leveling is already in the work queue */
1321                 goto out_unlock;
1322
1323         /*
1324          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1325          * the WL worker has to be scheduled anyway.
1326          */
1327         if (!ubi->scrub.rb_node) {
1328                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1329                         /* No physical eraseblocks - no deal */
1330                         goto out_unlock;
1331
1332                 /*
1333                  * We schedule wear-leveling only if the difference between the
1334                  * lowest erase counter of used physical eraseblocks and a high
1335                  * erase counter of free physical eraseblocks is greater than
1336                  * %UBI_WL_THRESHOLD.
1337                  */
1338                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1339                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1340
1341                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1342                         goto out_unlock;
1343                 dbg_wl("schedule wear-leveling");
1344         } else
1345                 dbg_wl("schedule scrubbing");
1346
1347         ubi->wl_scheduled = 1;
1348         spin_unlock(&ubi->wl_lock);
1349
1350         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1351         if (!wrk) {
1352                 err = -ENOMEM;
1353                 goto out_cancel;
1354         }
1355
1356         wrk->anchor = 0;
1357         wrk->func = &wear_leveling_worker;
1358         if (nested)
1359                 __schedule_ubi_work(ubi, wrk);
1360         else
1361                 schedule_ubi_work(ubi, wrk);
1362         return err;
1363
1364 out_cancel:
1365         spin_lock(&ubi->wl_lock);
1366         ubi->wl_scheduled = 0;
1367 out_unlock:
1368         spin_unlock(&ubi->wl_lock);
1369         return err;
1370 }
1371
1372 #ifdef CONFIG_MTD_UBI_FASTMAP
1373 /**
1374  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
1375  * @ubi: UBI device description object
1376  */
1377 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
1378 {
1379         struct ubi_work *wrk;
1380
1381         spin_lock(&ubi->wl_lock);
1382         if (ubi->wl_scheduled) {
1383                 spin_unlock(&ubi->wl_lock);
1384                 return 0;
1385         }
1386         ubi->wl_scheduled = 1;
1387         spin_unlock(&ubi->wl_lock);
1388
1389         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1390         if (!wrk) {
1391                 spin_lock(&ubi->wl_lock);
1392                 ubi->wl_scheduled = 0;
1393                 spin_unlock(&ubi->wl_lock);
1394                 return -ENOMEM;
1395         }
1396
1397         wrk->anchor = 1;
1398         wrk->func = &wear_leveling_worker;
1399         schedule_ubi_work(ubi, wrk);
1400         return 0;
1401 }
1402 #endif
1403
1404 /**
1405  * erase_worker - physical eraseblock erase worker function.
1406  * @ubi: UBI device description object
1407  * @wl_wrk: the work object
1408  * @cancel: non-zero if the worker has to free memory and exit
1409  *
1410  * This function erases a physical eraseblock and perform torture testing if
1411  * needed. It also takes care about marking the physical eraseblock bad if
1412  * needed. Returns zero in case of success and a negative error code in case of
1413  * failure.
1414  */
1415 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1416                         int cancel)
1417 {
1418         struct ubi_wl_entry *e = wl_wrk->e;
1419         int pnum = e->pnum;
1420         int vol_id = wl_wrk->vol_id;
1421         int lnum = wl_wrk->lnum;
1422         int err, available_consumed = 0;
1423
1424         if (cancel) {
1425                 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1426                 kfree(wl_wrk);
1427                 kmem_cache_free(ubi_wl_entry_slab, e);
1428                 return 0;
1429         }
1430
1431         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1432                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1433
1434         ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1435
1436         err = sync_erase(ubi, e, wl_wrk->torture);
1437         if (!err) {
1438                 /* Fine, we've erased it successfully */
1439                 kfree(wl_wrk);
1440
1441                 spin_lock(&ubi->wl_lock);
1442                 wl_tree_add(e, &ubi->free);
1443                 ubi->free_count++;
1444                 spin_unlock(&ubi->wl_lock);
1445
1446                 /*
1447                  * One more erase operation has happened, take care about
1448                  * protected physical eraseblocks.
1449                  */
1450                 serve_prot_queue(ubi);
1451
1452                 /* And take care about wear-leveling */
1453                 err = ensure_wear_leveling(ubi, 1);
1454                 return err;
1455         }
1456
1457         ubi_err("failed to erase PEB %d, error %d", pnum, err);
1458         kfree(wl_wrk);
1459
1460         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1461             err == -EBUSY) {
1462                 int err1;
1463
1464                 /* Re-schedule the LEB for erasure */
1465                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
1466                 if (err1) {
1467                         err = err1;
1468                         goto out_ro;
1469                 }
1470                 return err;
1471         }
1472
1473         kmem_cache_free(ubi_wl_entry_slab, e);
1474         if (err != -EIO)
1475                 /*
1476                  * If this is not %-EIO, we have no idea what to do. Scheduling
1477                  * this physical eraseblock for erasure again would cause
1478                  * errors again and again. Well, lets switch to R/O mode.
1479                  */
1480                 goto out_ro;
1481
1482         /* It is %-EIO, the PEB went bad */
1483
1484         if (!ubi->bad_allowed) {
1485                 ubi_err("bad physical eraseblock %d detected", pnum);
1486                 goto out_ro;
1487         }
1488
1489         spin_lock(&ubi->volumes_lock);
1490         if (ubi->beb_rsvd_pebs == 0) {
1491                 if (ubi->avail_pebs == 0) {
1492                         spin_unlock(&ubi->volumes_lock);
1493                         ubi_err("no reserved/available physical eraseblocks");
1494                         goto out_ro;
1495                 }
1496                 ubi->avail_pebs -= 1;
1497                 available_consumed = 1;
1498         }
1499         spin_unlock(&ubi->volumes_lock);
1500
1501         ubi_msg("mark PEB %d as bad", pnum);
1502         err = ubi_io_mark_bad(ubi, pnum);
1503         if (err)
1504                 goto out_ro;
1505
1506         spin_lock(&ubi->volumes_lock);
1507         if (ubi->beb_rsvd_pebs > 0) {
1508                 if (available_consumed) {
1509                         /*
1510                          * The amount of reserved PEBs increased since we last
1511                          * checked.
1512                          */
1513                         ubi->avail_pebs += 1;
1514                         available_consumed = 0;
1515                 }
1516                 ubi->beb_rsvd_pebs -= 1;
1517         }
1518         ubi->bad_peb_count += 1;
1519         ubi->good_peb_count -= 1;
1520         ubi_calculate_reserved(ubi);
1521         if (available_consumed)
1522                 ubi_warn("no PEBs in the reserved pool, used an available PEB");
1523         else if (ubi->beb_rsvd_pebs)
1524                 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs);
1525         else
1526                 ubi_warn("last PEB from the reserve was used");
1527         spin_unlock(&ubi->volumes_lock);
1528
1529         return err;
1530
1531 out_ro:
1532         if (available_consumed) {
1533                 spin_lock(&ubi->volumes_lock);
1534                 ubi->avail_pebs += 1;
1535                 spin_unlock(&ubi->volumes_lock);
1536         }
1537         ubi_ro_mode(ubi);
1538         return err;
1539 }
1540
1541 /**
1542  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1543  * @ubi: UBI device description object
1544  * @vol_id: the volume ID that last used this PEB
1545  * @lnum: the last used logical eraseblock number for the PEB
1546  * @pnum: physical eraseblock to return
1547  * @torture: if this physical eraseblock has to be tortured
1548  *
1549  * This function is called to return physical eraseblock @pnum to the pool of
1550  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1551  * occurred to this @pnum and it has to be tested. This function returns zero
1552  * in case of success, and a negative error code in case of failure.
1553  */
1554 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1555                    int pnum, int torture)
1556 {
1557         int err;
1558         struct ubi_wl_entry *e;
1559
1560         dbg_wl("PEB %d", pnum);
1561         ubi_assert(pnum >= 0);
1562         ubi_assert(pnum < ubi->peb_count);
1563
1564 retry:
1565         spin_lock(&ubi->wl_lock);
1566         e = ubi->lookuptbl[pnum];
1567         if (e == ubi->move_from) {
1568                 /*
1569                  * User is putting the physical eraseblock which was selected to
1570                  * be moved. It will be scheduled for erasure in the
1571                  * wear-leveling worker.
1572                  */
1573                 dbg_wl("PEB %d is being moved, wait", pnum);
1574                 spin_unlock(&ubi->wl_lock);
1575
1576                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1577                 mutex_lock(&ubi->move_mutex);
1578                 mutex_unlock(&ubi->move_mutex);
1579                 goto retry;
1580         } else if (e == ubi->move_to) {
1581                 /*
1582                  * User is putting the physical eraseblock which was selected
1583                  * as the target the data is moved to. It may happen if the EBA
1584                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1585                  * but the WL sub-system has not put the PEB to the "used" tree
1586                  * yet, but it is about to do this. So we just set a flag which
1587                  * will tell the WL worker that the PEB is not needed anymore
1588                  * and should be scheduled for erasure.
1589                  */
1590                 dbg_wl("PEB %d is the target of data moving", pnum);
1591                 ubi_assert(!ubi->move_to_put);
1592                 ubi->move_to_put = 1;
1593                 spin_unlock(&ubi->wl_lock);
1594                 return 0;
1595         } else {
1596                 if (in_wl_tree(e, &ubi->used)) {
1597                         self_check_in_wl_tree(ubi, e, &ubi->used);
1598                         rb_erase(&e->u.rb, &ubi->used);
1599                 } else if (in_wl_tree(e, &ubi->scrub)) {
1600                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1601                         rb_erase(&e->u.rb, &ubi->scrub);
1602                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1603                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1604                         rb_erase(&e->u.rb, &ubi->erroneous);
1605                         ubi->erroneous_peb_count -= 1;
1606                         ubi_assert(ubi->erroneous_peb_count >= 0);
1607                         /* Erroneous PEBs should be tortured */
1608                         torture = 1;
1609                 } else {
1610                         err = prot_queue_del(ubi, e->pnum);
1611                         if (err) {
1612                                 ubi_err("PEB %d not found", pnum);
1613                                 ubi_ro_mode(ubi);
1614                                 spin_unlock(&ubi->wl_lock);
1615                                 return err;
1616                         }
1617                 }
1618         }
1619         spin_unlock(&ubi->wl_lock);
1620
1621         err = schedule_erase(ubi, e, vol_id, lnum, torture);
1622         if (err) {
1623                 spin_lock(&ubi->wl_lock);
1624                 wl_tree_add(e, &ubi->used);
1625                 spin_unlock(&ubi->wl_lock);
1626         }
1627
1628         return err;
1629 }
1630
1631 /**
1632  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1633  * @ubi: UBI device description object
1634  * @pnum: the physical eraseblock to schedule
1635  *
1636  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1637  * needs scrubbing. This function schedules a physical eraseblock for
1638  * scrubbing which is done in background. This function returns zero in case of
1639  * success and a negative error code in case of failure.
1640  */
1641 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1642 {
1643         struct ubi_wl_entry *e;
1644
1645         ubi_msg("schedule PEB %d for scrubbing", pnum);
1646
1647 retry:
1648         spin_lock(&ubi->wl_lock);
1649         e = ubi->lookuptbl[pnum];
1650         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1651                                    in_wl_tree(e, &ubi->erroneous)) {
1652                 spin_unlock(&ubi->wl_lock);
1653                 return 0;
1654         }
1655
1656         if (e == ubi->move_to) {
1657                 /*
1658                  * This physical eraseblock was used to move data to. The data
1659                  * was moved but the PEB was not yet inserted to the proper
1660                  * tree. We should just wait a little and let the WL worker
1661                  * proceed.
1662                  */
1663                 spin_unlock(&ubi->wl_lock);
1664                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1665                 yield();
1666                 goto retry;
1667         }
1668
1669         if (in_wl_tree(e, &ubi->used)) {
1670                 self_check_in_wl_tree(ubi, e, &ubi->used);
1671                 rb_erase(&e->u.rb, &ubi->used);
1672         } else {
1673                 int err;
1674
1675                 err = prot_queue_del(ubi, e->pnum);
1676                 if (err) {
1677                         ubi_err("PEB %d not found", pnum);
1678                         ubi_ro_mode(ubi);
1679                         spin_unlock(&ubi->wl_lock);
1680                         return err;
1681                 }
1682         }
1683
1684         wl_tree_add(e, &ubi->scrub);
1685         spin_unlock(&ubi->wl_lock);
1686
1687         /*
1688          * Technically scrubbing is the same as wear-leveling, so it is done
1689          * by the WL worker.
1690          */
1691         return ensure_wear_leveling(ubi, 0);
1692 }
1693
1694 /**
1695  * ubi_wl_flush - flush all pending works.
1696  * @ubi: UBI device description object
1697  * @vol_id: the volume id to flush for
1698  * @lnum: the logical eraseblock number to flush for
1699  *
1700  * This function executes all pending works for a particular volume id /
1701  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1702  * acts as a wildcard for all of the corresponding volume numbers or logical
1703  * eraseblock numbers. It returns zero in case of success and a negative error
1704  * code in case of failure.
1705  */
1706 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1707 {
1708         int err = 0;
1709         int found = 1;
1710
1711         /*
1712          * Erase while the pending works queue is not empty, but not more than
1713          * the number of currently pending works.
1714          */
1715         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1716                vol_id, lnum, ubi->works_count);
1717
1718         while (found) {
1719                 struct ubi_work *wrk;
1720                 found = 0;
1721
1722                 down_read(&ubi->work_sem);
1723                 spin_lock(&ubi->wl_lock);
1724                 list_for_each_entry(wrk, &ubi->works, list) {
1725                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1726                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1727                                 list_del(&wrk->list);
1728                                 ubi->works_count -= 1;
1729                                 ubi_assert(ubi->works_count >= 0);
1730                                 spin_unlock(&ubi->wl_lock);
1731
1732                                 err = wrk->func(ubi, wrk, 0);
1733                                 if (err) {
1734                                         up_read(&ubi->work_sem);
1735                                         return err;
1736                                 }
1737
1738                                 spin_lock(&ubi->wl_lock);
1739                                 found = 1;
1740                                 break;
1741                         }
1742                 }
1743                 spin_unlock(&ubi->wl_lock);
1744                 up_read(&ubi->work_sem);
1745         }
1746
1747         /*
1748          * Make sure all the works which have been done in parallel are
1749          * finished.
1750          */
1751         down_write(&ubi->work_sem);
1752         up_write(&ubi->work_sem);
1753
1754         return err;
1755 }
1756
1757 /**
1758  * tree_destroy - destroy an RB-tree.
1759  * @root: the root of the tree to destroy
1760  */
1761 static void tree_destroy(struct rb_root *root)
1762 {
1763         struct rb_node *rb;
1764         struct ubi_wl_entry *e;
1765
1766         rb = root->rb_node;
1767         while (rb) {
1768                 if (rb->rb_left)
1769                         rb = rb->rb_left;
1770                 else if (rb->rb_right)
1771                         rb = rb->rb_right;
1772                 else {
1773                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1774
1775                         rb = rb_parent(rb);
1776                         if (rb) {
1777                                 if (rb->rb_left == &e->u.rb)
1778                                         rb->rb_left = NULL;
1779                                 else
1780                                         rb->rb_right = NULL;
1781                         }
1782
1783                         kmem_cache_free(ubi_wl_entry_slab, e);
1784                 }
1785         }
1786 }
1787
1788 /**
1789  * ubi_thread - UBI background thread.
1790  * @u: the UBI device description object pointer
1791  */
1792 int ubi_thread(void *u)
1793 {
1794         int failures = 0;
1795         struct ubi_device *ubi = u;
1796
1797         ubi_msg("background thread \"%s\" started, PID %d",
1798                 ubi->bgt_name, task_pid_nr(current));
1799
1800         set_freezable();
1801         for (;;) {
1802                 int err;
1803
1804                 if (kthread_should_stop())
1805                         break;
1806
1807                 if (try_to_freeze())
1808                         continue;
1809
1810                 spin_lock(&ubi->wl_lock);
1811                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1812                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1813                         set_current_state(TASK_INTERRUPTIBLE);
1814                         spin_unlock(&ubi->wl_lock);
1815                         schedule();
1816                         continue;
1817                 }
1818                 spin_unlock(&ubi->wl_lock);
1819
1820                 err = do_work(ubi);
1821                 if (err) {
1822                         ubi_err("%s: work failed with error code %d",
1823                                 ubi->bgt_name, err);
1824                         if (failures++ > WL_MAX_FAILURES) {
1825                                 /*
1826                                  * Too many failures, disable the thread and
1827                                  * switch to read-only mode.
1828                                  */
1829                                 ubi_msg("%s: %d consecutive failures",
1830                                         ubi->bgt_name, WL_MAX_FAILURES);
1831                                 ubi_ro_mode(ubi);
1832                                 ubi->thread_enabled = 0;
1833                                 continue;
1834                         }
1835                 } else
1836                         failures = 0;
1837
1838                 cond_resched();
1839         }
1840
1841         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1842         return 0;
1843 }
1844
1845 /**
1846  * cancel_pending - cancel all pending works.
1847  * @ubi: UBI device description object
1848  */
1849 static void cancel_pending(struct ubi_device *ubi)
1850 {
1851         while (!list_empty(&ubi->works)) {
1852                 struct ubi_work *wrk;
1853
1854                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1855                 list_del(&wrk->list);
1856                 wrk->func(ubi, wrk, 1);
1857                 ubi->works_count -= 1;
1858                 ubi_assert(ubi->works_count >= 0);
1859         }
1860 }
1861
1862 /**
1863  * ubi_wl_init - initialize the WL sub-system using attaching information.
1864  * @ubi: UBI device description object
1865  * @ai: attaching information
1866  *
1867  * This function returns zero in case of success, and a negative error code in
1868  * case of failure.
1869  */
1870 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1871 {
1872         int err, i, reserved_pebs, found_pebs = 0;
1873         struct rb_node *rb1, *rb2;
1874         struct ubi_ainf_volume *av;
1875         struct ubi_ainf_peb *aeb, *tmp;
1876         struct ubi_wl_entry *e;
1877
1878         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1879         spin_lock_init(&ubi->wl_lock);
1880         mutex_init(&ubi->move_mutex);
1881         init_rwsem(&ubi->work_sem);
1882         ubi->max_ec = ai->max_ec;
1883         INIT_LIST_HEAD(&ubi->works);
1884 #ifdef CONFIG_MTD_UBI_FASTMAP
1885         INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
1886 #endif
1887
1888         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1889
1890         err = -ENOMEM;
1891         ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1892         if (!ubi->lookuptbl)
1893                 return err;
1894
1895         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1896                 INIT_LIST_HEAD(&ubi->pq[i]);
1897         ubi->pq_head = 0;
1898
1899         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1900                 cond_resched();
1901
1902                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1903                 if (!e)
1904                         goto out_free;
1905
1906                 e->pnum = aeb->pnum;
1907                 e->ec = aeb->ec;
1908                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1909                 ubi->lookuptbl[e->pnum] = e;
1910                 if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
1911                         kmem_cache_free(ubi_wl_entry_slab, e);
1912                         goto out_free;
1913                 }
1914
1915                 found_pebs++;
1916         }
1917
1918         ubi->free_count = 0;
1919         list_for_each_entry(aeb, &ai->free, u.list) {
1920                 cond_resched();
1921
1922                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1923                 if (!e)
1924                         goto out_free;
1925
1926                 e->pnum = aeb->pnum;
1927                 e->ec = aeb->ec;
1928                 ubi_assert(e->ec >= 0);
1929                 ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
1930
1931                 wl_tree_add(e, &ubi->free);
1932                 ubi->free_count++;
1933
1934                 ubi->lookuptbl[e->pnum] = e;
1935
1936                 found_pebs++;
1937         }
1938
1939         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1940                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1941                         cond_resched();
1942
1943                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1944                         if (!e)
1945                                 goto out_free;
1946
1947                         e->pnum = aeb->pnum;
1948                         e->ec = aeb->ec;
1949                         ubi->lookuptbl[e->pnum] = e;
1950
1951                         if (!aeb->scrub) {
1952                                 dbg_wl("add PEB %d EC %d to the used tree",
1953                                        e->pnum, e->ec);
1954                                 wl_tree_add(e, &ubi->used);
1955                         } else {
1956                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1957                                        e->pnum, e->ec);
1958                                 wl_tree_add(e, &ubi->scrub);
1959                         }
1960
1961                         found_pebs++;
1962                 }
1963         }
1964
1965         dbg_wl("found %i PEBs", found_pebs);
1966
1967         if (ubi->fm)
1968                 ubi_assert(ubi->good_peb_count == \
1969                            found_pebs + ubi->fm->used_blocks);
1970         else
1971                 ubi_assert(ubi->good_peb_count == found_pebs);
1972
1973         reserved_pebs = WL_RESERVED_PEBS;
1974 #ifdef CONFIG_MTD_UBI_FASTMAP
1975         /* Reserve enough LEBs to store two fastmaps. */
1976         reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
1977 #endif
1978
1979         if (ubi->avail_pebs < reserved_pebs) {
1980                 ubi_err("no enough physical eraseblocks (%d, need %d)",
1981                         ubi->avail_pebs, reserved_pebs);
1982                 if (ubi->corr_peb_count)
1983                         ubi_err("%d PEBs are corrupted and not used",
1984                                 ubi->corr_peb_count);
1985                 goto out_free;
1986         }
1987         ubi->avail_pebs -= reserved_pebs;
1988         ubi->rsvd_pebs += reserved_pebs;
1989
1990         /* Schedule wear-leveling if needed */
1991         err = ensure_wear_leveling(ubi, 0);
1992         if (err)
1993                 goto out_free;
1994
1995         return 0;
1996
1997 out_free:
1998         cancel_pending(ubi);
1999         tree_destroy(&ubi->used);
2000         tree_destroy(&ubi->free);
2001         tree_destroy(&ubi->scrub);
2002         kfree(ubi->lookuptbl);
2003         return err;
2004 }
2005
2006 /**
2007  * protection_queue_destroy - destroy the protection queue.
2008  * @ubi: UBI device description object
2009  */
2010 static void protection_queue_destroy(struct ubi_device *ubi)
2011 {
2012         int i;
2013         struct ubi_wl_entry *e, *tmp;
2014
2015         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
2016                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
2017                         list_del(&e->u.list);
2018                         kmem_cache_free(ubi_wl_entry_slab, e);
2019                 }
2020         }
2021 }
2022
2023 /**
2024  * ubi_wl_close - close the wear-leveling sub-system.
2025  * @ubi: UBI device description object
2026  */
2027 void ubi_wl_close(struct ubi_device *ubi)
2028 {
2029         dbg_wl("close the WL sub-system");
2030         cancel_pending(ubi);
2031         protection_queue_destroy(ubi);
2032         tree_destroy(&ubi->used);
2033         tree_destroy(&ubi->erroneous);
2034         tree_destroy(&ubi->free);
2035         tree_destroy(&ubi->scrub);
2036         kfree(ubi->lookuptbl);
2037 }
2038
2039 /**
2040  * self_check_ec - make sure that the erase counter of a PEB is correct.
2041  * @ubi: UBI device description object
2042  * @pnum: the physical eraseblock number to check
2043  * @ec: the erase counter to check
2044  *
2045  * This function returns zero if the erase counter of physical eraseblock @pnum
2046  * is equivalent to @ec, and a negative error code if not or if an error
2047  * occurred.
2048  */
2049 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2050 {
2051         int err;
2052         long long read_ec;
2053         struct ubi_ec_hdr *ec_hdr;
2054
2055         if (!ubi_dbg_chk_gen(ubi))
2056                 return 0;
2057
2058         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2059         if (!ec_hdr)
2060                 return -ENOMEM;
2061
2062         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
2063         if (err && err != UBI_IO_BITFLIPS) {
2064                 /* The header does not have to exist */
2065                 err = 0;
2066                 goto out_free;
2067         }
2068
2069         read_ec = be64_to_cpu(ec_hdr->ec);
2070         if (ec != read_ec && read_ec - ec > 1) {
2071                 ubi_err("self-check failed for PEB %d", pnum);
2072                 ubi_err("read EC is %lld, should be %d", read_ec, ec);
2073                 dump_stack();
2074                 err = 1;
2075         } else
2076                 err = 0;
2077
2078 out_free:
2079         kfree(ec_hdr);
2080         return err;
2081 }
2082
2083 /**
2084  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2085  * @ubi: UBI device description object
2086  * @e: the wear-leveling entry to check
2087  * @root: the root of the tree
2088  *
2089  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2090  * is not.
2091  */
2092 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2093                                  struct ubi_wl_entry *e, struct rb_root *root)
2094 {
2095         if (!ubi_dbg_chk_gen(ubi))
2096                 return 0;
2097
2098         if (in_wl_tree(e, root))
2099                 return 0;
2100
2101         ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ",
2102                 e->pnum, e->ec, root);
2103         dump_stack();
2104         return -EINVAL;
2105 }
2106
2107 /**
2108  * self_check_in_pq - check if wear-leveling entry is in the protection
2109  *                        queue.
2110  * @ubi: UBI device description object
2111  * @e: the wear-leveling entry to check
2112  *
2113  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2114  */
2115 static int self_check_in_pq(const struct ubi_device *ubi,
2116                             struct ubi_wl_entry *e)
2117 {
2118         struct ubi_wl_entry *p;
2119         int i;
2120
2121         if (!ubi_dbg_chk_gen(ubi))
2122                 return 0;
2123
2124         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
2125                 list_for_each_entry(p, &ubi->pq[i], u.list)
2126                         if (p == e)
2127                                 return 0;
2128
2129         ubi_err("self-check failed for PEB %d, EC %d, Protect queue",
2130                 e->pnum, e->ec);
2131         dump_stack();
2132         return -EINVAL;
2133 }