6821952bcdb8d399db2aea069a1c7c4ff0dea6a5
[deliverable/linux.git] / drivers / mtd / ubi / wl.c
1 /*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19 */
20
21 /*
22 * UBI wear-leveling sub-system.
23 *
24 * This sub-system is responsible for wear-leveling. It works in terms of
25 * physical* eraseblocks and erase counters and knows nothing about logical
26 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27 * eraseblocks are of two types - used and free. Used physical eraseblocks are
28 * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29 * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30 *
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only %0xFF bytes.
33 *
34 * When physical eraseblocks are returned to the WL sub-system by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL sub-system.
38 *
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
42 *
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL sub-system may pick a free physical eraseblock with low erase
47 * counter, and so forth.
48 *
49 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
50 * bad.
51 *
52 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
53 * in a physical eraseblock, it has to be moved. Technically this is the same
54 * as moving it for wear-leveling reasons.
55 *
56 * As it was said, for the UBI sub-system all physical eraseblocks are either
57 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
58 * used eraseblocks are kept in a set of different RB-trees: @wl->used,
59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
60 *
61 * Note, in this implementation, we keep a small in-RAM object for each physical
62 * eraseblock. This is surely not a scalable solution. But it appears to be good
63 * enough for moderately large flashes and it is simple. In future, one may
64 * re-work this sub-system and make it more scalable.
65 *
66 * At the moment this sub-system does not utilize the sequence number, which
67 * was introduced relatively recently. But it would be wise to do this because
68 * the sequence number of a logical eraseblock characterizes how old is it. For
69 * example, when we move a PEB with low erase counter, and we need to pick the
70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
71 * pick target PEB with an average EC if our PEB is not very "old". This is a
72 * room for future re-works of the WL sub-system.
73 *
74 * Note: the stuff with protection trees looks too complex and is difficult to
75 * understand. Should be fixed.
76 */
77
78 #include <linux/slab.h>
79 #include <linux/crc32.h>
80 #include <linux/freezer.h>
81 #include <linux/kthread.h>
82 #include "ubi.h"
83
84 /* Number of physical eraseblocks reserved for wear-leveling purposes */
85 #define WL_RESERVED_PEBS 1
86
87 /*
88 * How many erase cycles are short term, unknown, and long term physical
89 * eraseblocks protected.
90 */
91 #define ST_PROTECTION 16
92 #define U_PROTECTION 10
93 #define LT_PROTECTION 4
94
95 /*
96 * Maximum difference between two erase counters. If this threshold is
97 * exceeded, the WL sub-system starts moving data from used physical
98 * eraseblocks with low erase counter to free physical eraseblocks with high
99 * erase counter.
100 */
101 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
102
103 /*
104 * When a physical eraseblock is moved, the WL sub-system has to pick the target
105 * physical eraseblock to move to. The simplest way would be just to pick the
106 * one with the highest erase counter. But in certain workloads this could lead
107 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
108 * situation when the picked physical eraseblock is constantly erased after the
109 * data is written to it. So, we have a constant which limits the highest erase
110 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
111 * does not pick eraseblocks with erase counter greater then the lowest erase
112 * counter plus %WL_FREE_MAX_DIFF.
113 */
114 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
115
116 /*
117 * Maximum number of consecutive background thread failures which is enough to
118 * switch to read-only mode.
119 */
120 #define WL_MAX_FAILURES 32
121
122 /**
123 * struct ubi_wl_prot_entry - PEB protection entry.
124 * @rb_pnum: link in the @wl->prot.pnum RB-tree
125 * @rb_aec: link in the @wl->prot.aec RB-tree
126 * @abs_ec: the absolute erase counter value when the protection ends
127 * @e: the wear-leveling entry of the physical eraseblock under protection
128 *
129 * When the WL sub-system returns a physical eraseblock, the physical
130 * eraseblock is protected from being moved for some "time". For this reason,
131 * the physical eraseblock is not directly moved from the @wl->free tree to the
132 * @wl->used tree. There is one more tree in between where this physical
133 * eraseblock is temporarily stored (@wl->prot).
134 *
135 * All this protection stuff is needed because:
136 * o we don't want to move physical eraseblocks just after we have given them
137 * to the user; instead, we first want to let users fill them up with data;
138 *
139 * o there is a chance that the user will put the physical eraseblock very
140 * soon, so it makes sense not to move it for some time, but wait; this is
141 * especially important in case of "short term" physical eraseblocks.
142 *
143 * Physical eraseblocks stay protected only for limited time. But the "time" is
144 * measured in erase cycles in this case. This is implemented with help of the
145 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
146 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
147 * the @wl->used tree.
148 *
149 * Protected physical eraseblocks are searched by physical eraseblock number
150 * (when they are put) and by the absolute erase counter (to check if it is
151 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
152 * storing the protected physical eraseblocks: @wl->prot.pnum and
153 * @wl->prot.aec. They are referred to as the "protection" trees. The
154 * first one is indexed by the physical eraseblock number. The second one is
155 * indexed by the absolute erase counter. Both trees store
156 * &struct ubi_wl_prot_entry objects.
157 *
158 * Each physical eraseblock has 2 main states: free and used. The former state
159 * corresponds to the @wl->free tree. The latter state is split up on several
160 * sub-states:
161 * o the WL movement is allowed (@wl->used tree);
162 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
163 * @wl->prot.aec trees);
164 * o scrubbing is needed (@wl->scrub tree).
165 *
166 * Depending on the sub-state, wear-leveling entries of the used physical
167 * eraseblocks may be kept in one of those trees.
168 */
169 struct ubi_wl_prot_entry {
170 struct rb_node rb_pnum;
171 struct rb_node rb_aec;
172 unsigned long long abs_ec;
173 struct ubi_wl_entry *e;
174 };
175
176 /**
177 * struct ubi_work - UBI work description data structure.
178 * @list: a link in the list of pending works
179 * @func: worker function
180 * @priv: private data of the worker function
181 * @e: physical eraseblock to erase
182 * @torture: if the physical eraseblock has to be tortured
183 *
184 * The @func pointer points to the worker function. If the @cancel argument is
185 * not zero, the worker has to free the resources and exit immediately. The
186 * worker has to return zero in case of success and a negative error code in
187 * case of failure.
188 */
189 struct ubi_work {
190 struct list_head list;
191 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
192 /* The below fields are only relevant to erasure works */
193 struct ubi_wl_entry *e;
194 int torture;
195 };
196
197 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
198 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
199 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
200 struct rb_root *root);
201 #else
202 #define paranoid_check_ec(ubi, pnum, ec) 0
203 #define paranoid_check_in_wl_tree(e, root)
204 #endif
205
206 /**
207 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
208 * @e: the wear-leveling entry to add
209 * @root: the root of the tree
210 *
211 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
212 * the @ubi->used and @ubi->free RB-trees.
213 */
214 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
215 {
216 struct rb_node **p, *parent = NULL;
217
218 p = &root->rb_node;
219 while (*p) {
220 struct ubi_wl_entry *e1;
221
222 parent = *p;
223 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
224
225 if (e->ec < e1->ec)
226 p = &(*p)->rb_left;
227 else if (e->ec > e1->ec)
228 p = &(*p)->rb_right;
229 else {
230 ubi_assert(e->pnum != e1->pnum);
231 if (e->pnum < e1->pnum)
232 p = &(*p)->rb_left;
233 else
234 p = &(*p)->rb_right;
235 }
236 }
237
238 rb_link_node(&e->rb, parent, p);
239 rb_insert_color(&e->rb, root);
240 }
241
242 /**
243 * do_work - do one pending work.
244 * @ubi: UBI device description object
245 *
246 * This function returns zero in case of success and a negative error code in
247 * case of failure.
248 */
249 static int do_work(struct ubi_device *ubi)
250 {
251 int err;
252 struct ubi_work *wrk;
253
254 cond_resched();
255
256 /*
257 * @ubi->work_sem is used to synchronize with the workers. Workers take
258 * it in read mode, so many of them may be doing works at a time. But
259 * the queue flush code has to be sure the whole queue of works is
260 * done, and it takes the mutex in write mode.
261 */
262 down_read(&ubi->work_sem);
263 spin_lock(&ubi->wl_lock);
264 if (list_empty(&ubi->works)) {
265 spin_unlock(&ubi->wl_lock);
266 up_read(&ubi->work_sem);
267 return 0;
268 }
269
270 wrk = list_entry(ubi->works.next, struct ubi_work, list);
271 list_del(&wrk->list);
272 ubi->works_count -= 1;
273 ubi_assert(ubi->works_count >= 0);
274 spin_unlock(&ubi->wl_lock);
275
276 /*
277 * Call the worker function. Do not touch the work structure
278 * after this call as it will have been freed or reused by that
279 * time by the worker function.
280 */
281 err = wrk->func(ubi, wrk, 0);
282 if (err)
283 ubi_err("work failed with error code %d", err);
284 up_read(&ubi->work_sem);
285
286 return err;
287 }
288
289 /**
290 * produce_free_peb - produce a free physical eraseblock.
291 * @ubi: UBI device description object
292 *
293 * This function tries to make a free PEB by means of synchronous execution of
294 * pending works. This may be needed if, for example the background thread is
295 * disabled. Returns zero in case of success and a negative error code in case
296 * of failure.
297 */
298 static int produce_free_peb(struct ubi_device *ubi)
299 {
300 int err;
301
302 spin_lock(&ubi->wl_lock);
303 while (!ubi->free.rb_node) {
304 spin_unlock(&ubi->wl_lock);
305
306 dbg_wl("do one work synchronously");
307 err = do_work(ubi);
308 if (err)
309 return err;
310
311 spin_lock(&ubi->wl_lock);
312 }
313 spin_unlock(&ubi->wl_lock);
314
315 return 0;
316 }
317
318 /**
319 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
320 * @e: the wear-leveling entry to check
321 * @root: the root of the tree
322 *
323 * This function returns non-zero if @e is in the @root RB-tree and zero if it
324 * is not.
325 */
326 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
327 {
328 struct rb_node *p;
329
330 p = root->rb_node;
331 while (p) {
332 struct ubi_wl_entry *e1;
333
334 e1 = rb_entry(p, struct ubi_wl_entry, rb);
335
336 if (e->pnum == e1->pnum) {
337 ubi_assert(e == e1);
338 return 1;
339 }
340
341 if (e->ec < e1->ec)
342 p = p->rb_left;
343 else if (e->ec > e1->ec)
344 p = p->rb_right;
345 else {
346 ubi_assert(e->pnum != e1->pnum);
347 if (e->pnum < e1->pnum)
348 p = p->rb_left;
349 else
350 p = p->rb_right;
351 }
352 }
353
354 return 0;
355 }
356
357 /**
358 * prot_tree_add - add physical eraseblock to protection trees.
359 * @ubi: UBI device description object
360 * @e: the physical eraseblock to add
361 * @pe: protection entry object to use
362 * @abs_ec: absolute erase counter value when this physical eraseblock has
363 * to be removed from the protection trees.
364 *
365 * @wl->lock has to be locked.
366 */
367 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
368 struct ubi_wl_prot_entry *pe, int abs_ec)
369 {
370 struct rb_node **p, *parent = NULL;
371 struct ubi_wl_prot_entry *pe1;
372
373 pe->e = e;
374 pe->abs_ec = ubi->abs_ec + abs_ec;
375
376 p = &ubi->prot.pnum.rb_node;
377 while (*p) {
378 parent = *p;
379 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
380
381 if (e->pnum < pe1->e->pnum)
382 p = &(*p)->rb_left;
383 else
384 p = &(*p)->rb_right;
385 }
386 rb_link_node(&pe->rb_pnum, parent, p);
387 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
388
389 p = &ubi->prot.aec.rb_node;
390 parent = NULL;
391 while (*p) {
392 parent = *p;
393 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
394
395 if (pe->abs_ec < pe1->abs_ec)
396 p = &(*p)->rb_left;
397 else
398 p = &(*p)->rb_right;
399 }
400 rb_link_node(&pe->rb_aec, parent, p);
401 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
402 }
403
404 /**
405 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
406 * @root: the RB-tree where to look for
407 * @max: highest possible erase counter
408 *
409 * This function looks for a wear leveling entry with erase counter closest to
410 * @max and less then @max.
411 */
412 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
413 {
414 struct rb_node *p;
415 struct ubi_wl_entry *e;
416
417 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
418 max += e->ec;
419
420 p = root->rb_node;
421 while (p) {
422 struct ubi_wl_entry *e1;
423
424 e1 = rb_entry(p, struct ubi_wl_entry, rb);
425 if (e1->ec >= max)
426 p = p->rb_left;
427 else {
428 p = p->rb_right;
429 e = e1;
430 }
431 }
432
433 return e;
434 }
435
436 /**
437 * ubi_wl_get_peb - get a physical eraseblock.
438 * @ubi: UBI device description object
439 * @dtype: type of data which will be stored in this physical eraseblock
440 *
441 * This function returns a physical eraseblock in case of success and a
442 * negative error code in case of failure. Might sleep.
443 */
444 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
445 {
446 int err, protect, medium_ec;
447 struct ubi_wl_entry *e, *first, *last;
448 struct ubi_wl_prot_entry *pe;
449
450 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
451 dtype == UBI_UNKNOWN);
452
453 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
454 if (!pe)
455 return -ENOMEM;
456
457 retry:
458 spin_lock(&ubi->wl_lock);
459 if (!ubi->free.rb_node) {
460 if (ubi->works_count == 0) {
461 ubi_assert(list_empty(&ubi->works));
462 ubi_err("no free eraseblocks");
463 spin_unlock(&ubi->wl_lock);
464 kfree(pe);
465 return -ENOSPC;
466 }
467 spin_unlock(&ubi->wl_lock);
468
469 err = produce_free_peb(ubi);
470 if (err < 0) {
471 kfree(pe);
472 return err;
473 }
474 goto retry;
475 }
476
477 switch (dtype) {
478 case UBI_LONGTERM:
479 /*
480 * For long term data we pick a physical eraseblock
481 * with high erase counter. But the highest erase
482 * counter we can pick is bounded by the the lowest
483 * erase counter plus %WL_FREE_MAX_DIFF.
484 */
485 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
486 protect = LT_PROTECTION;
487 break;
488 case UBI_UNKNOWN:
489 /*
490 * For unknown data we pick a physical eraseblock with
491 * medium erase counter. But we by no means can pick a
492 * physical eraseblock with erase counter greater or
493 * equivalent than the lowest erase counter plus
494 * %WL_FREE_MAX_DIFF.
495 */
496 first = rb_entry(rb_first(&ubi->free),
497 struct ubi_wl_entry, rb);
498 last = rb_entry(rb_last(&ubi->free),
499 struct ubi_wl_entry, rb);
500
501 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
502 e = rb_entry(ubi->free.rb_node,
503 struct ubi_wl_entry, rb);
504 else {
505 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
506 e = find_wl_entry(&ubi->free, medium_ec);
507 }
508 protect = U_PROTECTION;
509 break;
510 case UBI_SHORTTERM:
511 /*
512 * For short term data we pick a physical eraseblock
513 * with the lowest erase counter as we expect it will
514 * be erased soon.
515 */
516 e = rb_entry(rb_first(&ubi->free),
517 struct ubi_wl_entry, rb);
518 protect = ST_PROTECTION;
519 break;
520 default:
521 protect = 0;
522 e = NULL;
523 BUG();
524 }
525
526 /*
527 * Move the physical eraseblock to the protection trees where it will
528 * be protected from being moved for some time.
529 */
530 paranoid_check_in_wl_tree(e, &ubi->free);
531 rb_erase(&e->rb, &ubi->free);
532 prot_tree_add(ubi, e, pe, protect);
533
534 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
535 spin_unlock(&ubi->wl_lock);
536
537 return e->pnum;
538 }
539
540 /**
541 * prot_tree_del - remove a physical eraseblock from the protection trees
542 * @ubi: UBI device description object
543 * @pnum: the physical eraseblock to remove
544 *
545 * This function returns PEB @pnum from the protection trees and returns zero
546 * in case of success and %-ENODEV if the PEB was not found in the protection
547 * trees.
548 */
549 static int prot_tree_del(struct ubi_device *ubi, int pnum)
550 {
551 struct rb_node *p;
552 struct ubi_wl_prot_entry *pe = NULL;
553
554 p = ubi->prot.pnum.rb_node;
555 while (p) {
556
557 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
558
559 if (pnum == pe->e->pnum)
560 goto found;
561
562 if (pnum < pe->e->pnum)
563 p = p->rb_left;
564 else
565 p = p->rb_right;
566 }
567
568 return -ENODEV;
569
570 found:
571 ubi_assert(pe->e->pnum == pnum);
572 rb_erase(&pe->rb_aec, &ubi->prot.aec);
573 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
574 kfree(pe);
575 return 0;
576 }
577
578 /**
579 * sync_erase - synchronously erase a physical eraseblock.
580 * @ubi: UBI device description object
581 * @e: the the physical eraseblock to erase
582 * @torture: if the physical eraseblock has to be tortured
583 *
584 * This function returns zero in case of success and a negative error code in
585 * case of failure.
586 */
587 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
588 {
589 int err;
590 struct ubi_ec_hdr *ec_hdr;
591 unsigned long long ec = e->ec;
592
593 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
594
595 err = paranoid_check_ec(ubi, e->pnum, e->ec);
596 if (err > 0)
597 return -EINVAL;
598
599 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
600 if (!ec_hdr)
601 return -ENOMEM;
602
603 err = ubi_io_sync_erase(ubi, e->pnum, torture);
604 if (err < 0)
605 goto out_free;
606
607 ec += err;
608 if (ec > UBI_MAX_ERASECOUNTER) {
609 /*
610 * Erase counter overflow. Upgrade UBI and use 64-bit
611 * erase counters internally.
612 */
613 ubi_err("erase counter overflow at PEB %d, EC %llu",
614 e->pnum, ec);
615 err = -EINVAL;
616 goto out_free;
617 }
618
619 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
620
621 ec_hdr->ec = cpu_to_be64(ec);
622
623 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
624 if (err)
625 goto out_free;
626
627 e->ec = ec;
628 spin_lock(&ubi->wl_lock);
629 if (e->ec > ubi->max_ec)
630 ubi->max_ec = e->ec;
631 spin_unlock(&ubi->wl_lock);
632
633 out_free:
634 kfree(ec_hdr);
635 return err;
636 }
637
638 /**
639 * check_protection_over - check if it is time to stop protecting some
640 * physical eraseblocks.
641 * @ubi: UBI device description object
642 *
643 * This function is called after each erase operation, when the absolute erase
644 * counter is incremented, to check if some physical eraseblock have not to be
645 * protected any longer. These physical eraseblocks are moved from the
646 * protection trees to the used tree.
647 */
648 static void check_protection_over(struct ubi_device *ubi)
649 {
650 struct ubi_wl_prot_entry *pe;
651
652 /*
653 * There may be several protected physical eraseblock to remove,
654 * process them all.
655 */
656 while (1) {
657 spin_lock(&ubi->wl_lock);
658 if (!ubi->prot.aec.rb_node) {
659 spin_unlock(&ubi->wl_lock);
660 break;
661 }
662
663 pe = rb_entry(rb_first(&ubi->prot.aec),
664 struct ubi_wl_prot_entry, rb_aec);
665
666 if (pe->abs_ec > ubi->abs_ec) {
667 spin_unlock(&ubi->wl_lock);
668 break;
669 }
670
671 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
672 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
673 rb_erase(&pe->rb_aec, &ubi->prot.aec);
674 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
675 wl_tree_add(pe->e, &ubi->used);
676 spin_unlock(&ubi->wl_lock);
677
678 kfree(pe);
679 cond_resched();
680 }
681 }
682
683 /**
684 * schedule_ubi_work - schedule a work.
685 * @ubi: UBI device description object
686 * @wrk: the work to schedule
687 *
688 * This function enqueues a work defined by @wrk to the tail of the pending
689 * works list.
690 */
691 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
692 {
693 spin_lock(&ubi->wl_lock);
694 list_add_tail(&wrk->list, &ubi->works);
695 ubi_assert(ubi->works_count >= 0);
696 ubi->works_count += 1;
697 if (ubi->thread_enabled)
698 wake_up_process(ubi->bgt_thread);
699 spin_unlock(&ubi->wl_lock);
700 }
701
702 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
703 int cancel);
704
705 /**
706 * schedule_erase - schedule an erase work.
707 * @ubi: UBI device description object
708 * @e: the WL entry of the physical eraseblock to erase
709 * @torture: if the physical eraseblock has to be tortured
710 *
711 * This function returns zero in case of success and a %-ENOMEM in case of
712 * failure.
713 */
714 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
715 int torture)
716 {
717 struct ubi_work *wl_wrk;
718
719 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
720 e->pnum, e->ec, torture);
721
722 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
723 if (!wl_wrk)
724 return -ENOMEM;
725
726 wl_wrk->func = &erase_worker;
727 wl_wrk->e = e;
728 wl_wrk->torture = torture;
729
730 schedule_ubi_work(ubi, wl_wrk);
731 return 0;
732 }
733
734 /**
735 * wear_leveling_worker - wear-leveling worker function.
736 * @ubi: UBI device description object
737 * @wrk: the work object
738 * @cancel: non-zero if the worker has to free memory and exit
739 *
740 * This function copies a more worn out physical eraseblock to a less worn out
741 * one. Returns zero in case of success and a negative error code in case of
742 * failure.
743 */
744 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
745 int cancel)
746 {
747 int err, put = 0, scrubbing = 0, protect = 0;
748 struct ubi_wl_prot_entry *uninitialized_var(pe);
749 struct ubi_wl_entry *e1, *e2;
750 struct ubi_vid_hdr *vid_hdr;
751
752 kfree(wrk);
753
754 if (cancel)
755 return 0;
756
757 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
758 if (!vid_hdr)
759 return -ENOMEM;
760
761 mutex_lock(&ubi->move_mutex);
762 spin_lock(&ubi->wl_lock);
763 ubi_assert(!ubi->move_from && !ubi->move_to);
764 ubi_assert(!ubi->move_to_put);
765
766 if (!ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
768 /*
769 * No free physical eraseblocks? Well, they must be waiting in
770 * the queue to be erased. Cancel movement - it will be
771 * triggered again when a free physical eraseblock appears.
772 *
773 * No used physical eraseblocks? They must be temporarily
774 * protected from being moved. They will be moved to the
775 * @ubi->used tree later and the wear-leveling will be
776 * triggered again.
777 */
778 dbg_wl("cancel WL, a list is empty: free %d, used %d",
779 !ubi->free.rb_node, !ubi->used.rb_node);
780 goto out_cancel;
781 }
782
783 if (!ubi->scrub.rb_node) {
784 /*
785 * Now pick the least worn-out used physical eraseblock and a
786 * highly worn-out free physical eraseblock. If the erase
787 * counters differ much enough, start wear-leveling.
788 */
789 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
790 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
791
792 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
793 dbg_wl("no WL needed: min used EC %d, max free EC %d",
794 e1->ec, e2->ec);
795 goto out_cancel;
796 }
797 paranoid_check_in_wl_tree(e1, &ubi->used);
798 rb_erase(&e1->rb, &ubi->used);
799 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
800 e1->pnum, e1->ec, e2->pnum, e2->ec);
801 } else {
802 /* Perform scrubbing */
803 scrubbing = 1;
804 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
805 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
806 paranoid_check_in_wl_tree(e1, &ubi->scrub);
807 rb_erase(&e1->rb, &ubi->scrub);
808 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
809 }
810
811 paranoid_check_in_wl_tree(e2, &ubi->free);
812 rb_erase(&e2->rb, &ubi->free);
813 ubi->move_from = e1;
814 ubi->move_to = e2;
815 spin_unlock(&ubi->wl_lock);
816
817 /*
818 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
819 * We so far do not know which logical eraseblock our physical
820 * eraseblock (@e1) belongs to. We have to read the volume identifier
821 * header first.
822 *
823 * Note, we are protected from this PEB being unmapped and erased. The
824 * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
825 * which is being moved was unmapped.
826 */
827
828 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
829 if (err && err != UBI_IO_BITFLIPS) {
830 if (err == UBI_IO_PEB_FREE) {
831 /*
832 * We are trying to move PEB without a VID header. UBI
833 * always write VID headers shortly after the PEB was
834 * given, so we have a situation when it did not have
835 * chance to write it down because it was preempted.
836 * Just re-schedule the work, so that next time it will
837 * likely have the VID header in place.
838 */
839 dbg_wl("PEB %d has no VID header", e1->pnum);
840 goto out_not_moved;
841 }
842
843 ubi_err("error %d while reading VID header from PEB %d",
844 err, e1->pnum);
845 if (err > 0)
846 err = -EIO;
847 goto out_error;
848 }
849
850 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
851 if (err) {
852
853 if (err < 0)
854 goto out_error;
855 if (err == 1)
856 goto out_not_moved;
857
858 /*
859 * For some reason the LEB was not moved - it might be because
860 * the volume is being deleted. We should prevent this PEB from
861 * being selected for wear-levelling movement for some "time",
862 * so put it to the protection tree.
863 */
864
865 dbg_wl("cancelled moving PEB %d", e1->pnum);
866 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
867 if (!pe) {
868 err = -ENOMEM;
869 goto out_error;
870 }
871
872 protect = 1;
873 }
874
875 ubi_free_vid_hdr(ubi, vid_hdr);
876 if (scrubbing && !protect)
877 ubi_msg("scrubbed PEB %d, data moved to PEB %d",
878 e1->pnum, e2->pnum);
879
880 spin_lock(&ubi->wl_lock);
881 if (protect)
882 prot_tree_add(ubi, e1, pe, protect);
883 if (!ubi->move_to_put)
884 wl_tree_add(e2, &ubi->used);
885 else
886 put = 1;
887 ubi->move_from = ubi->move_to = NULL;
888 ubi->move_to_put = ubi->wl_scheduled = 0;
889 spin_unlock(&ubi->wl_lock);
890
891 if (put) {
892 /*
893 * Well, the target PEB was put meanwhile, schedule it for
894 * erasure.
895 */
896 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
897 err = schedule_erase(ubi, e2, 0);
898 if (err)
899 goto out_error;
900 }
901
902 if (!protect) {
903 err = schedule_erase(ubi, e1, 0);
904 if (err)
905 goto out_error;
906 }
907
908
909 dbg_wl("done");
910 mutex_unlock(&ubi->move_mutex);
911 return 0;
912
913 /*
914 * For some reasons the LEB was not moved, might be an error, might be
915 * something else. @e1 was not changed, so return it back. @e2 might
916 * be changed, schedule it for erasure.
917 */
918 out_not_moved:
919 ubi_free_vid_hdr(ubi, vid_hdr);
920 spin_lock(&ubi->wl_lock);
921 if (scrubbing)
922 wl_tree_add(e1, &ubi->scrub);
923 else
924 wl_tree_add(e1, &ubi->used);
925 ubi->move_from = ubi->move_to = NULL;
926 ubi->move_to_put = ubi->wl_scheduled = 0;
927 spin_unlock(&ubi->wl_lock);
928
929 err = schedule_erase(ubi, e2, 0);
930 if (err)
931 goto out_error;
932
933 mutex_unlock(&ubi->move_mutex);
934 return 0;
935
936 out_error:
937 ubi_err("error %d while moving PEB %d to PEB %d",
938 err, e1->pnum, e2->pnum);
939
940 ubi_free_vid_hdr(ubi, vid_hdr);
941 spin_lock(&ubi->wl_lock);
942 ubi->move_from = ubi->move_to = NULL;
943 ubi->move_to_put = ubi->wl_scheduled = 0;
944 spin_unlock(&ubi->wl_lock);
945
946 kmem_cache_free(ubi_wl_entry_slab, e1);
947 kmem_cache_free(ubi_wl_entry_slab, e2);
948 ubi_ro_mode(ubi);
949
950 mutex_unlock(&ubi->move_mutex);
951 return err;
952
953 out_cancel:
954 ubi->wl_scheduled = 0;
955 spin_unlock(&ubi->wl_lock);
956 mutex_unlock(&ubi->move_mutex);
957 ubi_free_vid_hdr(ubi, vid_hdr);
958 return 0;
959 }
960
961 /**
962 * ensure_wear_leveling - schedule wear-leveling if it is needed.
963 * @ubi: UBI device description object
964 *
965 * This function checks if it is time to start wear-leveling and schedules it
966 * if yes. This function returns zero in case of success and a negative error
967 * code in case of failure.
968 */
969 static int ensure_wear_leveling(struct ubi_device *ubi)
970 {
971 int err = 0;
972 struct ubi_wl_entry *e1;
973 struct ubi_wl_entry *e2;
974 struct ubi_work *wrk;
975
976 spin_lock(&ubi->wl_lock);
977 if (ubi->wl_scheduled)
978 /* Wear-leveling is already in the work queue */
979 goto out_unlock;
980
981 /*
982 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
983 * the WL worker has to be scheduled anyway.
984 */
985 if (!ubi->scrub.rb_node) {
986 if (!ubi->used.rb_node || !ubi->free.rb_node)
987 /* No physical eraseblocks - no deal */
988 goto out_unlock;
989
990 /*
991 * We schedule wear-leveling only if the difference between the
992 * lowest erase counter of used physical eraseblocks and a high
993 * erase counter of free physical eraseblocks is greater then
994 * %UBI_WL_THRESHOLD.
995 */
996 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
997 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
998
999 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1000 goto out_unlock;
1001 dbg_wl("schedule wear-leveling");
1002 } else
1003 dbg_wl("schedule scrubbing");
1004
1005 ubi->wl_scheduled = 1;
1006 spin_unlock(&ubi->wl_lock);
1007
1008 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1009 if (!wrk) {
1010 err = -ENOMEM;
1011 goto out_cancel;
1012 }
1013
1014 wrk->func = &wear_leveling_worker;
1015 schedule_ubi_work(ubi, wrk);
1016 return err;
1017
1018 out_cancel:
1019 spin_lock(&ubi->wl_lock);
1020 ubi->wl_scheduled = 0;
1021 out_unlock:
1022 spin_unlock(&ubi->wl_lock);
1023 return err;
1024 }
1025
1026 /**
1027 * erase_worker - physical eraseblock erase worker function.
1028 * @ubi: UBI device description object
1029 * @wl_wrk: the work object
1030 * @cancel: non-zero if the worker has to free memory and exit
1031 *
1032 * This function erases a physical eraseblock and perform torture testing if
1033 * needed. It also takes care about marking the physical eraseblock bad if
1034 * needed. Returns zero in case of success and a negative error code in case of
1035 * failure.
1036 */
1037 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1038 int cancel)
1039 {
1040 struct ubi_wl_entry *e = wl_wrk->e;
1041 int pnum = e->pnum, err, need;
1042
1043 if (cancel) {
1044 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1045 kfree(wl_wrk);
1046 kmem_cache_free(ubi_wl_entry_slab, e);
1047 return 0;
1048 }
1049
1050 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1051
1052 err = sync_erase(ubi, e, wl_wrk->torture);
1053 if (!err) {
1054 /* Fine, we've erased it successfully */
1055 kfree(wl_wrk);
1056
1057 spin_lock(&ubi->wl_lock);
1058 ubi->abs_ec += 1;
1059 wl_tree_add(e, &ubi->free);
1060 spin_unlock(&ubi->wl_lock);
1061
1062 /*
1063 * One more erase operation has happened, take care about protected
1064 * physical eraseblocks.
1065 */
1066 check_protection_over(ubi);
1067
1068 /* And take care about wear-leveling */
1069 err = ensure_wear_leveling(ubi);
1070 return err;
1071 }
1072
1073 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1074 kfree(wl_wrk);
1075 kmem_cache_free(ubi_wl_entry_slab, e);
1076
1077 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1078 err == -EBUSY) {
1079 int err1;
1080
1081 /* Re-schedule the LEB for erasure */
1082 err1 = schedule_erase(ubi, e, 0);
1083 if (err1) {
1084 err = err1;
1085 goto out_ro;
1086 }
1087 return err;
1088 } else if (err != -EIO) {
1089 /*
1090 * If this is not %-EIO, we have no idea what to do. Scheduling
1091 * this physical eraseblock for erasure again would cause
1092 * errors again and again. Well, lets switch to RO mode.
1093 */
1094 goto out_ro;
1095 }
1096
1097 /* It is %-EIO, the PEB went bad */
1098
1099 if (!ubi->bad_allowed) {
1100 ubi_err("bad physical eraseblock %d detected", pnum);
1101 goto out_ro;
1102 }
1103
1104 spin_lock(&ubi->volumes_lock);
1105 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1106 if (need > 0) {
1107 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1108 ubi->avail_pebs -= need;
1109 ubi->rsvd_pebs += need;
1110 ubi->beb_rsvd_pebs += need;
1111 if (need > 0)
1112 ubi_msg("reserve more %d PEBs", need);
1113 }
1114
1115 if (ubi->beb_rsvd_pebs == 0) {
1116 spin_unlock(&ubi->volumes_lock);
1117 ubi_err("no reserved physical eraseblocks");
1118 goto out_ro;
1119 }
1120
1121 spin_unlock(&ubi->volumes_lock);
1122 ubi_msg("mark PEB %d as bad", pnum);
1123
1124 err = ubi_io_mark_bad(ubi, pnum);
1125 if (err)
1126 goto out_ro;
1127
1128 spin_lock(&ubi->volumes_lock);
1129 ubi->beb_rsvd_pebs -= 1;
1130 ubi->bad_peb_count += 1;
1131 ubi->good_peb_count -= 1;
1132 ubi_calculate_reserved(ubi);
1133 if (ubi->beb_rsvd_pebs == 0)
1134 ubi_warn("last PEB from the reserved pool was used");
1135 spin_unlock(&ubi->volumes_lock);
1136
1137 return err;
1138
1139 out_ro:
1140 ubi_ro_mode(ubi);
1141 return err;
1142 }
1143
1144 /**
1145 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1146 * @ubi: UBI device description object
1147 * @pnum: physical eraseblock to return
1148 * @torture: if this physical eraseblock has to be tortured
1149 *
1150 * This function is called to return physical eraseblock @pnum to the pool of
1151 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1152 * occurred to this @pnum and it has to be tested. This function returns zero
1153 * in case of success, and a negative error code in case of failure.
1154 */
1155 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1156 {
1157 int err;
1158 struct ubi_wl_entry *e;
1159
1160 dbg_wl("PEB %d", pnum);
1161 ubi_assert(pnum >= 0);
1162 ubi_assert(pnum < ubi->peb_count);
1163
1164 retry:
1165 spin_lock(&ubi->wl_lock);
1166 e = ubi->lookuptbl[pnum];
1167 if (e == ubi->move_from) {
1168 /*
1169 * User is putting the physical eraseblock which was selected to
1170 * be moved. It will be scheduled for erasure in the
1171 * wear-leveling worker.
1172 */
1173 dbg_wl("PEB %d is being moved, wait", pnum);
1174 spin_unlock(&ubi->wl_lock);
1175
1176 /* Wait for the WL worker by taking the @ubi->move_mutex */
1177 mutex_lock(&ubi->move_mutex);
1178 mutex_unlock(&ubi->move_mutex);
1179 goto retry;
1180 } else if (e == ubi->move_to) {
1181 /*
1182 * User is putting the physical eraseblock which was selected
1183 * as the target the data is moved to. It may happen if the EBA
1184 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1185 * but the WL sub-system has not put the PEB to the "used" tree
1186 * yet, but it is about to do this. So we just set a flag which
1187 * will tell the WL worker that the PEB is not needed anymore
1188 * and should be scheduled for erasure.
1189 */
1190 dbg_wl("PEB %d is the target of data moving", pnum);
1191 ubi_assert(!ubi->move_to_put);
1192 ubi->move_to_put = 1;
1193 spin_unlock(&ubi->wl_lock);
1194 return 0;
1195 } else {
1196 if (in_wl_tree(e, &ubi->used)) {
1197 paranoid_check_in_wl_tree(e, &ubi->used);
1198 rb_erase(&e->rb, &ubi->used);
1199 } else if (in_wl_tree(e, &ubi->scrub)) {
1200 paranoid_check_in_wl_tree(e, &ubi->scrub);
1201 rb_erase(&e->rb, &ubi->scrub);
1202 } else {
1203 err = prot_tree_del(ubi, e->pnum);
1204 if (err) {
1205 ubi_err("PEB %d not found", pnum);
1206 ubi_ro_mode(ubi);
1207 spin_unlock(&ubi->wl_lock);
1208 return err;
1209 }
1210 }
1211 }
1212 spin_unlock(&ubi->wl_lock);
1213
1214 err = schedule_erase(ubi, e, torture);
1215 if (err) {
1216 spin_lock(&ubi->wl_lock);
1217 wl_tree_add(e, &ubi->used);
1218 spin_unlock(&ubi->wl_lock);
1219 }
1220
1221 return err;
1222 }
1223
1224 /**
1225 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1226 * @ubi: UBI device description object
1227 * @pnum: the physical eraseblock to schedule
1228 *
1229 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1230 * needs scrubbing. This function schedules a physical eraseblock for
1231 * scrubbing which is done in background. This function returns zero in case of
1232 * success and a negative error code in case of failure.
1233 */
1234 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1235 {
1236 struct ubi_wl_entry *e;
1237
1238 dbg_msg("schedule PEB %d for scrubbing", pnum);
1239
1240 retry:
1241 spin_lock(&ubi->wl_lock);
1242 e = ubi->lookuptbl[pnum];
1243 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1244 spin_unlock(&ubi->wl_lock);
1245 return 0;
1246 }
1247
1248 if (e == ubi->move_to) {
1249 /*
1250 * This physical eraseblock was used to move data to. The data
1251 * was moved but the PEB was not yet inserted to the proper
1252 * tree. We should just wait a little and let the WL worker
1253 * proceed.
1254 */
1255 spin_unlock(&ubi->wl_lock);
1256 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1257 yield();
1258 goto retry;
1259 }
1260
1261 if (in_wl_tree(e, &ubi->used)) {
1262 paranoid_check_in_wl_tree(e, &ubi->used);
1263 rb_erase(&e->rb, &ubi->used);
1264 } else {
1265 int err;
1266
1267 err = prot_tree_del(ubi, e->pnum);
1268 if (err) {
1269 ubi_err("PEB %d not found", pnum);
1270 ubi_ro_mode(ubi);
1271 spin_unlock(&ubi->wl_lock);
1272 return err;
1273 }
1274 }
1275
1276 wl_tree_add(e, &ubi->scrub);
1277 spin_unlock(&ubi->wl_lock);
1278
1279 /*
1280 * Technically scrubbing is the same as wear-leveling, so it is done
1281 * by the WL worker.
1282 */
1283 return ensure_wear_leveling(ubi);
1284 }
1285
1286 /**
1287 * ubi_wl_flush - flush all pending works.
1288 * @ubi: UBI device description object
1289 *
1290 * This function returns zero in case of success and a negative error code in
1291 * case of failure.
1292 */
1293 int ubi_wl_flush(struct ubi_device *ubi)
1294 {
1295 int err;
1296
1297 /*
1298 * Erase while the pending works queue is not empty, but not more then
1299 * the number of currently pending works.
1300 */
1301 dbg_wl("flush (%d pending works)", ubi->works_count);
1302 while (ubi->works_count) {
1303 err = do_work(ubi);
1304 if (err)
1305 return err;
1306 }
1307
1308 /*
1309 * Make sure all the works which have been done in parallel are
1310 * finished.
1311 */
1312 down_write(&ubi->work_sem);
1313 up_write(&ubi->work_sem);
1314
1315 /*
1316 * And in case last was the WL worker and it cancelled the LEB
1317 * movement, flush again.
1318 */
1319 while (ubi->works_count) {
1320 dbg_wl("flush more (%d pending works)", ubi->works_count);
1321 err = do_work(ubi);
1322 if (err)
1323 return err;
1324 }
1325
1326 return 0;
1327 }
1328
1329 /**
1330 * tree_destroy - destroy an RB-tree.
1331 * @root: the root of the tree to destroy
1332 */
1333 static void tree_destroy(struct rb_root *root)
1334 {
1335 struct rb_node *rb;
1336 struct ubi_wl_entry *e;
1337
1338 rb = root->rb_node;
1339 while (rb) {
1340 if (rb->rb_left)
1341 rb = rb->rb_left;
1342 else if (rb->rb_right)
1343 rb = rb->rb_right;
1344 else {
1345 e = rb_entry(rb, struct ubi_wl_entry, rb);
1346
1347 rb = rb_parent(rb);
1348 if (rb) {
1349 if (rb->rb_left == &e->rb)
1350 rb->rb_left = NULL;
1351 else
1352 rb->rb_right = NULL;
1353 }
1354
1355 kmem_cache_free(ubi_wl_entry_slab, e);
1356 }
1357 }
1358 }
1359
1360 /**
1361 * ubi_thread - UBI background thread.
1362 * @u: the UBI device description object pointer
1363 */
1364 int ubi_thread(void *u)
1365 {
1366 int failures = 0;
1367 struct ubi_device *ubi = u;
1368
1369 ubi_msg("background thread \"%s\" started, PID %d",
1370 ubi->bgt_name, task_pid_nr(current));
1371
1372 set_freezable();
1373 for (;;) {
1374 int err;
1375
1376 if (kthread_should_stop())
1377 break;
1378
1379 if (try_to_freeze())
1380 continue;
1381
1382 spin_lock(&ubi->wl_lock);
1383 if (list_empty(&ubi->works) || ubi->ro_mode ||
1384 !ubi->thread_enabled) {
1385 set_current_state(TASK_INTERRUPTIBLE);
1386 spin_unlock(&ubi->wl_lock);
1387 schedule();
1388 continue;
1389 }
1390 spin_unlock(&ubi->wl_lock);
1391
1392 err = do_work(ubi);
1393 if (err) {
1394 ubi_err("%s: work failed with error code %d",
1395 ubi->bgt_name, err);
1396 if (failures++ > WL_MAX_FAILURES) {
1397 /*
1398 * Too many failures, disable the thread and
1399 * switch to read-only mode.
1400 */
1401 ubi_msg("%s: %d consecutive failures",
1402 ubi->bgt_name, WL_MAX_FAILURES);
1403 ubi_ro_mode(ubi);
1404 break;
1405 }
1406 } else
1407 failures = 0;
1408
1409 cond_resched();
1410 }
1411
1412 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1413 return 0;
1414 }
1415
1416 /**
1417 * cancel_pending - cancel all pending works.
1418 * @ubi: UBI device description object
1419 */
1420 static void cancel_pending(struct ubi_device *ubi)
1421 {
1422 while (!list_empty(&ubi->works)) {
1423 struct ubi_work *wrk;
1424
1425 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1426 list_del(&wrk->list);
1427 wrk->func(ubi, wrk, 1);
1428 ubi->works_count -= 1;
1429 ubi_assert(ubi->works_count >= 0);
1430 }
1431 }
1432
1433 /**
1434 * ubi_wl_init_scan - initialize the WL sub-system using scanning information.
1435 * @ubi: UBI device description object
1436 * @si: scanning information
1437 *
1438 * This function returns zero in case of success, and a negative error code in
1439 * case of failure.
1440 */
1441 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1442 {
1443 int err;
1444 struct rb_node *rb1, *rb2;
1445 struct ubi_scan_volume *sv;
1446 struct ubi_scan_leb *seb, *tmp;
1447 struct ubi_wl_entry *e;
1448
1449
1450 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1451 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1452 spin_lock_init(&ubi->wl_lock);
1453 mutex_init(&ubi->move_mutex);
1454 init_rwsem(&ubi->work_sem);
1455 ubi->max_ec = si->max_ec;
1456 INIT_LIST_HEAD(&ubi->works);
1457
1458 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1459
1460 err = -ENOMEM;
1461 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1462 if (!ubi->lookuptbl)
1463 return err;
1464
1465 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1466 cond_resched();
1467
1468 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1469 if (!e)
1470 goto out_free;
1471
1472 e->pnum = seb->pnum;
1473 e->ec = seb->ec;
1474 ubi->lookuptbl[e->pnum] = e;
1475 if (schedule_erase(ubi, e, 0)) {
1476 kmem_cache_free(ubi_wl_entry_slab, e);
1477 goto out_free;
1478 }
1479 }
1480
1481 list_for_each_entry(seb, &si->free, u.list) {
1482 cond_resched();
1483
1484 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1485 if (!e)
1486 goto out_free;
1487
1488 e->pnum = seb->pnum;
1489 e->ec = seb->ec;
1490 ubi_assert(e->ec >= 0);
1491 wl_tree_add(e, &ubi->free);
1492 ubi->lookuptbl[e->pnum] = e;
1493 }
1494
1495 list_for_each_entry(seb, &si->corr, u.list) {
1496 cond_resched();
1497
1498 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1499 if (!e)
1500 goto out_free;
1501
1502 e->pnum = seb->pnum;
1503 e->ec = seb->ec;
1504 ubi->lookuptbl[e->pnum] = e;
1505 if (schedule_erase(ubi, e, 0)) {
1506 kmem_cache_free(ubi_wl_entry_slab, e);
1507 goto out_free;
1508 }
1509 }
1510
1511 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1512 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1513 cond_resched();
1514
1515 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1516 if (!e)
1517 goto out_free;
1518
1519 e->pnum = seb->pnum;
1520 e->ec = seb->ec;
1521 ubi->lookuptbl[e->pnum] = e;
1522 if (!seb->scrub) {
1523 dbg_wl("add PEB %d EC %d to the used tree",
1524 e->pnum, e->ec);
1525 wl_tree_add(e, &ubi->used);
1526 } else {
1527 dbg_wl("add PEB %d EC %d to the scrub tree",
1528 e->pnum, e->ec);
1529 wl_tree_add(e, &ubi->scrub);
1530 }
1531 }
1532 }
1533
1534 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1535 ubi_err("no enough physical eraseblocks (%d, need %d)",
1536 ubi->avail_pebs, WL_RESERVED_PEBS);
1537 goto out_free;
1538 }
1539 ubi->avail_pebs -= WL_RESERVED_PEBS;
1540 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1541
1542 /* Schedule wear-leveling if needed */
1543 err = ensure_wear_leveling(ubi);
1544 if (err)
1545 goto out_free;
1546
1547 return 0;
1548
1549 out_free:
1550 cancel_pending(ubi);
1551 tree_destroy(&ubi->used);
1552 tree_destroy(&ubi->free);
1553 tree_destroy(&ubi->scrub);
1554 kfree(ubi->lookuptbl);
1555 return err;
1556 }
1557
1558 /**
1559 * protection_trees_destroy - destroy the protection RB-trees.
1560 * @ubi: UBI device description object
1561 */
1562 static void protection_trees_destroy(struct ubi_device *ubi)
1563 {
1564 struct rb_node *rb;
1565 struct ubi_wl_prot_entry *pe;
1566
1567 rb = ubi->prot.aec.rb_node;
1568 while (rb) {
1569 if (rb->rb_left)
1570 rb = rb->rb_left;
1571 else if (rb->rb_right)
1572 rb = rb->rb_right;
1573 else {
1574 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1575
1576 rb = rb_parent(rb);
1577 if (rb) {
1578 if (rb->rb_left == &pe->rb_aec)
1579 rb->rb_left = NULL;
1580 else
1581 rb->rb_right = NULL;
1582 }
1583
1584 kmem_cache_free(ubi_wl_entry_slab, pe->e);
1585 kfree(pe);
1586 }
1587 }
1588 }
1589
1590 /**
1591 * ubi_wl_close - close the wear-leveling sub-system.
1592 * @ubi: UBI device description object
1593 */
1594 void ubi_wl_close(struct ubi_device *ubi)
1595 {
1596 dbg_wl("close the WL sub-system");
1597 cancel_pending(ubi);
1598 protection_trees_destroy(ubi);
1599 tree_destroy(&ubi->used);
1600 tree_destroy(&ubi->free);
1601 tree_destroy(&ubi->scrub);
1602 kfree(ubi->lookuptbl);
1603 }
1604
1605 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1606
1607 /**
1608 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1609 * is correct.
1610 * @ubi: UBI device description object
1611 * @pnum: the physical eraseblock number to check
1612 * @ec: the erase counter to check
1613 *
1614 * This function returns zero if the erase counter of physical eraseblock @pnum
1615 * is equivalent to @ec, %1 if not, and a negative error code if an error
1616 * occurred.
1617 */
1618 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1619 {
1620 int err;
1621 long long read_ec;
1622 struct ubi_ec_hdr *ec_hdr;
1623
1624 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1625 if (!ec_hdr)
1626 return -ENOMEM;
1627
1628 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1629 if (err && err != UBI_IO_BITFLIPS) {
1630 /* The header does not have to exist */
1631 err = 0;
1632 goto out_free;
1633 }
1634
1635 read_ec = be64_to_cpu(ec_hdr->ec);
1636 if (ec != read_ec) {
1637 ubi_err("paranoid check failed for PEB %d", pnum);
1638 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1639 ubi_dbg_dump_stack();
1640 err = 1;
1641 } else
1642 err = 0;
1643
1644 out_free:
1645 kfree(ec_hdr);
1646 return err;
1647 }
1648
1649 /**
1650 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1651 * in a WL RB-tree.
1652 * @e: the wear-leveling entry to check
1653 * @root: the root of the tree
1654 *
1655 * This function returns zero if @e is in the @root RB-tree and %1 if it
1656 * is not.
1657 */
1658 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1659 struct rb_root *root)
1660 {
1661 if (in_wl_tree(e, root))
1662 return 0;
1663
1664 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1665 e->pnum, e->ec, root);
1666 ubi_dbg_dump_stack();
1667 return 1;
1668 }
1669
1670 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
This page took 0.198958 seconds and 4 git commands to generate.