[MTD] [NAND] make s3c2410 indicate an error for multi-bit read errors
[deliverable/linux.git] / drivers / mtd / ubi / eba.c
CommitLineData
801c135c
AB
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * The UBI Eraseblock Association (EBA) unit.
23 *
24 * This unit is responsible for I/O to/from logical eraseblock.
25 *
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
29 *
30 * The EBA unit implements per-logical eraseblock locking. Before accessing a
31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
3a8d4642 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
801c135c
AB
35 * (@vol_id, @lnum) pairs.
36 *
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
42 */
43
44#include <linux/slab.h>
45#include <linux/crc32.h>
46#include <linux/err.h>
47#include "ubi.h"
48
e8823bd6
AB
49/* Number of physical eraseblocks reserved for atomic LEB change operation */
50#define EBA_RESERVED_PEBS 1
51
801c135c
AB
52/**
53 * next_sqnum - get next sequence number.
54 * @ubi: UBI device description object
55 *
56 * This function returns next sequence number to use, which is just the current
57 * global sequence counter value. It also increases the global sequence
58 * counter.
59 */
60static unsigned long long next_sqnum(struct ubi_device *ubi)
61{
62 unsigned long long sqnum;
63
64 spin_lock(&ubi->ltree_lock);
65 sqnum = ubi->global_sqnum++;
66 spin_unlock(&ubi->ltree_lock);
67
68 return sqnum;
69}
70
71/**
72 * ubi_get_compat - get compatibility flags of a volume.
73 * @ubi: UBI device description object
74 * @vol_id: volume ID
75 *
76 * This function returns compatibility flags for an internal volume. User
77 * volumes have no compatibility flags, so %0 is returned.
78 */
79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
80{
81 if (vol_id == UBI_LAYOUT_VOL_ID)
82 return UBI_LAYOUT_VOLUME_COMPAT;
83 return 0;
84}
85
86/**
87 * ltree_lookup - look up the lock tree.
88 * @ubi: UBI device description object
89 * @vol_id: volume ID
90 * @lnum: logical eraseblock number
91 *
3a8d4642 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
801c135c
AB
93 * object if the logical eraseblock is locked and %NULL if it is not.
94 * @ubi->ltree_lock has to be locked.
95 */
3a8d4642
AB
96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
97 int lnum)
801c135c
AB
98{
99 struct rb_node *p;
100
101 p = ubi->ltree.rb_node;
102 while (p) {
3a8d4642 103 struct ubi_ltree_entry *le;
801c135c 104
3a8d4642 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
801c135c
AB
106
107 if (vol_id < le->vol_id)
108 p = p->rb_left;
109 else if (vol_id > le->vol_id)
110 p = p->rb_right;
111 else {
112 if (lnum < le->lnum)
113 p = p->rb_left;
114 else if (lnum > le->lnum)
115 p = p->rb_right;
116 else
117 return le;
118 }
119 }
120
121 return NULL;
122}
123
124/**
125 * ltree_add_entry - add new entry to the lock tree.
126 * @ubi: UBI device description object
127 * @vol_id: volume ID
128 * @lnum: logical eraseblock number
129 *
130 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
131 * lock tree. If such entry is already there, its usage counter is increased.
132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
133 * failed.
134 */
3a8d4642
AB
135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
136 int vol_id, int lnum)
801c135c 137{
3a8d4642 138 struct ubi_ltree_entry *le, *le1, *le_free;
801c135c 139
3a8d4642 140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
801c135c
AB
141 if (!le)
142 return ERR_PTR(-ENOMEM);
143
144 le->vol_id = vol_id;
145 le->lnum = lnum;
146
147 spin_lock(&ubi->ltree_lock);
148 le1 = ltree_lookup(ubi, vol_id, lnum);
149
150 if (le1) {
151 /*
152 * This logical eraseblock is already locked. The newly
153 * allocated lock entry is not needed.
154 */
155 le_free = le;
156 le = le1;
157 } else {
158 struct rb_node **p, *parent = NULL;
159
160 /*
161 * No lock entry, add the newly allocated one to the
162 * @ubi->ltree RB-tree.
163 */
164 le_free = NULL;
165
166 p = &ubi->ltree.rb_node;
167 while (*p) {
168 parent = *p;
3a8d4642 169 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
801c135c
AB
170
171 if (vol_id < le1->vol_id)
172 p = &(*p)->rb_left;
173 else if (vol_id > le1->vol_id)
174 p = &(*p)->rb_right;
175 else {
176 ubi_assert(lnum != le1->lnum);
177 if (lnum < le1->lnum)
178 p = &(*p)->rb_left;
179 else
180 p = &(*p)->rb_right;
181 }
182 }
183
184 rb_link_node(&le->rb, parent, p);
185 rb_insert_color(&le->rb, &ubi->ltree);
186 }
187 le->users += 1;
188 spin_unlock(&ubi->ltree_lock);
189
190 if (le_free)
3a8d4642 191 kmem_cache_free(ubi_ltree_slab, le_free);
801c135c
AB
192
193 return le;
194}
195
196/**
197 * leb_read_lock - lock logical eraseblock for reading.
198 * @ubi: UBI device description object
199 * @vol_id: volume ID
200 * @lnum: logical eraseblock number
201 *
202 * This function locks a logical eraseblock for reading. Returns zero in case
203 * of success and a negative error code in case of failure.
204 */
205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
206{
3a8d4642 207 struct ubi_ltree_entry *le;
801c135c
AB
208
209 le = ltree_add_entry(ubi, vol_id, lnum);
210 if (IS_ERR(le))
211 return PTR_ERR(le);
212 down_read(&le->mutex);
213 return 0;
214}
215
216/**
217 * leb_read_unlock - unlock logical eraseblock.
218 * @ubi: UBI device description object
219 * @vol_id: volume ID
220 * @lnum: logical eraseblock number
221 */
222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
223{
224 int free = 0;
3a8d4642 225 struct ubi_ltree_entry *le;
801c135c
AB
226
227 spin_lock(&ubi->ltree_lock);
228 le = ltree_lookup(ubi, vol_id, lnum);
229 le->users -= 1;
230 ubi_assert(le->users >= 0);
231 if (le->users == 0) {
232 rb_erase(&le->rb, &ubi->ltree);
233 free = 1;
234 }
235 spin_unlock(&ubi->ltree_lock);
236
237 up_read(&le->mutex);
238 if (free)
3a8d4642 239 kmem_cache_free(ubi_ltree_slab, le);
801c135c
AB
240}
241
242/**
243 * leb_write_lock - lock logical eraseblock for writing.
244 * @ubi: UBI device description object
245 * @vol_id: volume ID
246 * @lnum: logical eraseblock number
247 *
248 * This function locks a logical eraseblock for writing. Returns zero in case
249 * of success and a negative error code in case of failure.
250 */
251static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
252{
3a8d4642 253 struct ubi_ltree_entry *le;
801c135c
AB
254
255 le = ltree_add_entry(ubi, vol_id, lnum);
256 if (IS_ERR(le))
257 return PTR_ERR(le);
258 down_write(&le->mutex);
259 return 0;
260}
261
43f9b25a
AB
262/**
263 * leb_write_lock - lock logical eraseblock for writing.
264 * @ubi: UBI device description object
265 * @vol_id: volume ID
266 * @lnum: logical eraseblock number
267 *
268 * This function locks a logical eraseblock for writing if there is no
269 * contention and does nothing if there is contention. Returns %0 in case of
270 * success, %1 in case of contention, and and a negative error code in case of
271 * failure.
272 */
273static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
274{
275 int free;
276 struct ubi_ltree_entry *le;
277
278 le = ltree_add_entry(ubi, vol_id, lnum);
279 if (IS_ERR(le))
280 return PTR_ERR(le);
281 if (down_write_trylock(&le->mutex))
282 return 0;
283
284 /* Contention, cancel */
285 spin_lock(&ubi->ltree_lock);
286 le->users -= 1;
287 ubi_assert(le->users >= 0);
288 if (le->users == 0) {
289 rb_erase(&le->rb, &ubi->ltree);
290 free = 1;
291 } else
292 free = 0;
293 spin_unlock(&ubi->ltree_lock);
294 if (free)
295 kmem_cache_free(ubi_ltree_slab, le);
296
297 return 1;
298}
299
801c135c
AB
300/**
301 * leb_write_unlock - unlock logical eraseblock.
302 * @ubi: UBI device description object
303 * @vol_id: volume ID
304 * @lnum: logical eraseblock number
305 */
306static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
307{
308 int free;
3a8d4642 309 struct ubi_ltree_entry *le;
801c135c
AB
310
311 spin_lock(&ubi->ltree_lock);
312 le = ltree_lookup(ubi, vol_id, lnum);
313 le->users -= 1;
314 ubi_assert(le->users >= 0);
315 if (le->users == 0) {
316 rb_erase(&le->rb, &ubi->ltree);
317 free = 1;
318 } else
319 free = 0;
320 spin_unlock(&ubi->ltree_lock);
321
322 up_write(&le->mutex);
323 if (free)
3a8d4642 324 kmem_cache_free(ubi_ltree_slab, le);
801c135c
AB
325}
326
327/**
328 * ubi_eba_unmap_leb - un-map logical eraseblock.
329 * @ubi: UBI device description object
89b96b69 330 * @vol: volume description object
801c135c
AB
331 * @lnum: logical eraseblock number
332 *
333 * This function un-maps logical eraseblock @lnum and schedules corresponding
334 * physical eraseblock for erasure. Returns zero in case of success and a
335 * negative error code in case of failure.
336 */
89b96b69
AB
337int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
338 int lnum)
801c135c 339{
89b96b69 340 int err, pnum, vol_id = vol->vol_id;
801c135c 341
e73f4459 342 ubi_assert(ubi->ref_count > 0);
d05c77a8
AB
343 ubi_assert(vol->ref_count > 0);
344
801c135c
AB
345 if (ubi->ro_mode)
346 return -EROFS;
347
348 err = leb_write_lock(ubi, vol_id, lnum);
349 if (err)
350 return err;
351
352 pnum = vol->eba_tbl[lnum];
353 if (pnum < 0)
354 /* This logical eraseblock is already unmapped */
355 goto out_unlock;
356
357 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
358
359 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
360 err = ubi_wl_put_peb(ubi, pnum, 0);
361
362out_unlock:
363 leb_write_unlock(ubi, vol_id, lnum);
364 return err;
365}
366
367/**
368 * ubi_eba_read_leb - read data.
369 * @ubi: UBI device description object
89b96b69 370 * @vol: volume description object
801c135c
AB
371 * @lnum: logical eraseblock number
372 * @buf: buffer to store the read data
373 * @offset: offset from where to read
374 * @len: how many bytes to read
375 * @check: data CRC check flag
376 *
377 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
378 * bytes. The @check flag only makes sense for static volumes and forces
379 * eraseblock data CRC checking.
380 *
381 * In case of success this function returns zero. In case of a static volume,
382 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
383 * returned for any volume type if an ECC error was detected by the MTD device
384 * driver. Other negative error cored may be returned in case of other errors.
385 */
89b96b69
AB
386int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
387 void *buf, int offset, int len, int check)
801c135c 388{
89b96b69 389 int err, pnum, scrub = 0, vol_id = vol->vol_id;
801c135c 390 struct ubi_vid_hdr *vid_hdr;
a6343afb 391 uint32_t uninitialized_var(crc);
801c135c 392
e73f4459 393 ubi_assert(ubi->ref_count > 0);
d05c77a8
AB
394 ubi_assert(vol->ref_count > 0);
395
801c135c
AB
396 err = leb_read_lock(ubi, vol_id, lnum);
397 if (err)
398 return err;
399
400 pnum = vol->eba_tbl[lnum];
401 if (pnum < 0) {
402 /*
403 * The logical eraseblock is not mapped, fill the whole buffer
404 * with 0xFF bytes. The exception is static volumes for which
405 * it is an error to read unmapped logical eraseblocks.
406 */
407 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
408 len, offset, vol_id, lnum);
409 leb_read_unlock(ubi, vol_id, lnum);
410 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
411 memset(buf, 0xFF, len);
412 return 0;
413 }
414
415 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
416 len, offset, vol_id, lnum, pnum);
417
418 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
419 check = 0;
420
421retry:
422 if (check) {
33818bbb 423 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
424 if (!vid_hdr) {
425 err = -ENOMEM;
426 goto out_unlock;
427 }
428
429 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
430 if (err && err != UBI_IO_BITFLIPS) {
431 if (err > 0) {
432 /*
433 * The header is either absent or corrupted.
434 * The former case means there is a bug -
435 * switch to read-only mode just in case.
436 * The latter case means a real corruption - we
437 * may try to recover data. FIXME: but this is
438 * not implemented.
439 */
440 if (err == UBI_IO_BAD_VID_HDR) {
441 ubi_warn("bad VID header at PEB %d, LEB"
442 "%d:%d", pnum, vol_id, lnum);
443 err = -EBADMSG;
444 } else
445 ubi_ro_mode(ubi);
446 }
447 goto out_free;
448 } else if (err == UBI_IO_BITFLIPS)
449 scrub = 1;
450
3261ebd7
CH
451 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
452 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
801c135c 453
3261ebd7 454 crc = be32_to_cpu(vid_hdr->data_crc);
801c135c
AB
455 ubi_free_vid_hdr(ubi, vid_hdr);
456 }
457
458 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
459 if (err) {
460 if (err == UBI_IO_BITFLIPS) {
461 scrub = 1;
462 err = 0;
463 } else if (err == -EBADMSG) {
464 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
465 goto out_unlock;
466 scrub = 1;
467 if (!check) {
468 ubi_msg("force data checking");
469 check = 1;
470 goto retry;
471 }
472 } else
473 goto out_unlock;
474 }
475
476 if (check) {
2ab934b8 477 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
801c135c
AB
478 if (crc1 != crc) {
479 ubi_warn("CRC error: calculated %#08x, must be %#08x",
480 crc1, crc);
481 err = -EBADMSG;
482 goto out_unlock;
483 }
484 }
485
486 if (scrub)
487 err = ubi_wl_scrub_peb(ubi, pnum);
488
489 leb_read_unlock(ubi, vol_id, lnum);
490 return err;
491
492out_free:
493 ubi_free_vid_hdr(ubi, vid_hdr);
494out_unlock:
495 leb_read_unlock(ubi, vol_id, lnum);
496 return err;
497}
498
499/**
500 * recover_peb - recover from write failure.
501 * @ubi: UBI device description object
502 * @pnum: the physical eraseblock to recover
503 * @vol_id: volume ID
504 * @lnum: logical eraseblock number
505 * @buf: data which was not written because of the write failure
506 * @offset: offset of the failed write
507 * @len: how many bytes should have been written
508 *
509 * This function is called in case of a write failure and moves all good data
510 * from the potentially bad physical eraseblock to a good physical eraseblock.
511 * This function also writes the data which was not written due to the failure.
512 * Returns new physical eraseblock number in case of success, and a negative
513 * error code in case of failure.
514 */
515static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
516 const void *buf, int offset, int len)
517{
518 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
519 struct ubi_volume *vol = ubi->volumes[idx];
520 struct ubi_vid_hdr *vid_hdr;
801c135c 521
33818bbb 522 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
523 if (!vid_hdr) {
524 return -ENOMEM;
525 }
526
e88d6e10
AB
527 mutex_lock(&ubi->buf_mutex);
528
801c135c
AB
529retry:
530 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
531 if (new_pnum < 0) {
e88d6e10 532 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
533 ubi_free_vid_hdr(ubi, vid_hdr);
534 return new_pnum;
535 }
536
537 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
538
539 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
540 if (err && err != UBI_IO_BITFLIPS) {
541 if (err > 0)
542 err = -EIO;
543 goto out_put;
544 }
545
3261ebd7 546 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
547 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
548 if (err)
549 goto write_error;
550
551 data_size = offset + len;
e88d6e10 552 memset(ubi->peb_buf1 + offset, 0xFF, len);
801c135c
AB
553
554 /* Read everything before the area where the write failure happened */
555 if (offset > 0) {
e88d6e10
AB
556 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
557 if (err && err != UBI_IO_BITFLIPS)
801c135c 558 goto out_put;
801c135c
AB
559 }
560
e88d6e10 561 memcpy(ubi->peb_buf1 + offset, buf, len);
801c135c 562
e88d6e10
AB
563 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
564 if (err)
801c135c 565 goto write_error;
801c135c 566
e88d6e10 567 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
568 ubi_free_vid_hdr(ubi, vid_hdr);
569
570 vol->eba_tbl[lnum] = new_pnum;
571 ubi_wl_put_peb(ubi, pnum, 1);
572
573 ubi_msg("data was successfully recovered");
574 return 0;
575
576out_put:
e88d6e10 577 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
578 ubi_wl_put_peb(ubi, new_pnum, 1);
579 ubi_free_vid_hdr(ubi, vid_hdr);
580 return err;
581
582write_error:
583 /*
584 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
585 * get another one.
586 */
587 ubi_warn("failed to write to PEB %d", new_pnum);
588 ubi_wl_put_peb(ubi, new_pnum, 1);
589 if (++tries > UBI_IO_RETRIES) {
e88d6e10 590 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
591 ubi_free_vid_hdr(ubi, vid_hdr);
592 return err;
593 }
594 ubi_msg("try again");
595 goto retry;
596}
597
598/**
599 * ubi_eba_write_leb - write data to dynamic volume.
600 * @ubi: UBI device description object
89b96b69 601 * @vol: volume description object
801c135c
AB
602 * @lnum: logical eraseblock number
603 * @buf: the data to write
604 * @offset: offset within the logical eraseblock where to write
605 * @len: how many bytes to write
606 * @dtype: data type
607 *
608 * This function writes data to logical eraseblock @lnum of a dynamic volume
89b96b69 609 * @vol. Returns zero in case of success and a negative error code in case
801c135c
AB
610 * of failure. In case of error, it is possible that something was still
611 * written to the flash media, but may be some garbage.
612 */
89b96b69 613int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
801c135c
AB
614 const void *buf, int offset, int len, int dtype)
615{
89b96b69 616 int err, pnum, tries = 0, vol_id = vol->vol_id;
801c135c
AB
617 struct ubi_vid_hdr *vid_hdr;
618
e73f4459 619 ubi_assert(ubi->ref_count > 0);
d05c77a8
AB
620 ubi_assert(vol->ref_count > 0);
621
801c135c
AB
622 if (ubi->ro_mode)
623 return -EROFS;
624
625 err = leb_write_lock(ubi, vol_id, lnum);
626 if (err)
627 return err;
628
629 pnum = vol->eba_tbl[lnum];
630 if (pnum >= 0) {
631 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
632 len, offset, vol_id, lnum, pnum);
633
634 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
635 if (err) {
636 ubi_warn("failed to write data to PEB %d", pnum);
637 if (err == -EIO && ubi->bad_allowed)
89b96b69
AB
638 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
639 offset, len);
801c135c
AB
640 if (err)
641 ubi_ro_mode(ubi);
642 }
643 leb_write_unlock(ubi, vol_id, lnum);
644 return err;
645 }
646
647 /*
648 * The logical eraseblock is not mapped. We have to get a free physical
649 * eraseblock and write the volume identifier header there first.
650 */
33818bbb 651 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
652 if (!vid_hdr) {
653 leb_write_unlock(ubi, vol_id, lnum);
654 return -ENOMEM;
655 }
656
657 vid_hdr->vol_type = UBI_VID_DYNAMIC;
3261ebd7
CH
658 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
659 vid_hdr->vol_id = cpu_to_be32(vol_id);
660 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 661 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 662 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
663
664retry:
665 pnum = ubi_wl_get_peb(ubi, dtype);
666 if (pnum < 0) {
667 ubi_free_vid_hdr(ubi, vid_hdr);
668 leb_write_unlock(ubi, vol_id, lnum);
669 return pnum;
670 }
671
672 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
673 len, offset, vol_id, lnum, pnum);
674
675 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
676 if (err) {
677 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
678 vol_id, lnum, pnum);
679 goto write_error;
680 }
681
393852ec
AB
682 if (len) {
683 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
684 if (err) {
685 ubi_warn("failed to write %d bytes at offset %d of "
686 "LEB %d:%d, PEB %d", len, offset, vol_id,
687 lnum, pnum);
688 goto write_error;
689 }
801c135c
AB
690 }
691
692 vol->eba_tbl[lnum] = pnum;
693
694 leb_write_unlock(ubi, vol_id, lnum);
695 ubi_free_vid_hdr(ubi, vid_hdr);
696 return 0;
697
698write_error:
699 if (err != -EIO || !ubi->bad_allowed) {
700 ubi_ro_mode(ubi);
701 leb_write_unlock(ubi, vol_id, lnum);
702 ubi_free_vid_hdr(ubi, vid_hdr);
703 return err;
704 }
705
706 /*
707 * Fortunately, this is the first write operation to this physical
708 * eraseblock, so just put it and request a new one. We assume that if
709 * this physical eraseblock went bad, the erase code will handle that.
710 */
711 err = ubi_wl_put_peb(ubi, pnum, 1);
712 if (err || ++tries > UBI_IO_RETRIES) {
713 ubi_ro_mode(ubi);
714 leb_write_unlock(ubi, vol_id, lnum);
715 ubi_free_vid_hdr(ubi, vid_hdr);
716 return err;
717 }
718
3261ebd7 719 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
720 ubi_msg("try another PEB");
721 goto retry;
722}
723
724/**
725 * ubi_eba_write_leb_st - write data to static volume.
726 * @ubi: UBI device description object
89b96b69 727 * @vol: volume description object
801c135c
AB
728 * @lnum: logical eraseblock number
729 * @buf: data to write
730 * @len: how many bytes to write
731 * @dtype: data type
732 * @used_ebs: how many logical eraseblocks will this volume contain
733 *
734 * This function writes data to logical eraseblock @lnum of static volume
89b96b69 735 * @vol. The @used_ebs argument should contain total number of logical
801c135c
AB
736 * eraseblock in this static volume.
737 *
738 * When writing to the last logical eraseblock, the @len argument doesn't have
739 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
740 * to the real data size, although the @buf buffer has to contain the
741 * alignment. In all other cases, @len has to be aligned.
742 *
743 * It is prohibited to write more then once to logical eraseblocks of static
744 * volumes. This function returns zero in case of success and a negative error
745 * code in case of failure.
746 */
89b96b69
AB
747int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
748 int lnum, const void *buf, int len, int dtype,
749 int used_ebs)
801c135c 750{
89b96b69 751 int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
801c135c
AB
752 struct ubi_vid_hdr *vid_hdr;
753 uint32_t crc;
754
e73f4459 755 ubi_assert(ubi->ref_count > 0);
d05c77a8
AB
756 ubi_assert(vol->ref_count > 0);
757
801c135c
AB
758 if (ubi->ro_mode)
759 return -EROFS;
760
761 if (lnum == used_ebs - 1)
762 /* If this is the last LEB @len may be unaligned */
763 len = ALIGN(data_size, ubi->min_io_size);
764 else
765 ubi_assert(len % ubi->min_io_size == 0);
766
33818bbb 767 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
768 if (!vid_hdr)
769 return -ENOMEM;
770
771 err = leb_write_lock(ubi, vol_id, lnum);
772 if (err) {
773 ubi_free_vid_hdr(ubi, vid_hdr);
774 return err;
775 }
776
3261ebd7
CH
777 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
778 vid_hdr->vol_id = cpu_to_be32(vol_id);
779 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 780 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 781 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
782
783 crc = crc32(UBI_CRC32_INIT, buf, data_size);
784 vid_hdr->vol_type = UBI_VID_STATIC;
3261ebd7
CH
785 vid_hdr->data_size = cpu_to_be32(data_size);
786 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
787 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c
AB
788
789retry:
790 pnum = ubi_wl_get_peb(ubi, dtype);
791 if (pnum < 0) {
792 ubi_free_vid_hdr(ubi, vid_hdr);
793 leb_write_unlock(ubi, vol_id, lnum);
794 return pnum;
795 }
796
797 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
798 len, vol_id, lnum, pnum, used_ebs);
799
800 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
801 if (err) {
802 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
803 vol_id, lnum, pnum);
804 goto write_error;
805 }
806
807 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
808 if (err) {
809 ubi_warn("failed to write %d bytes of data to PEB %d",
810 len, pnum);
811 goto write_error;
812 }
813
814 ubi_assert(vol->eba_tbl[lnum] < 0);
815 vol->eba_tbl[lnum] = pnum;
816
817 leb_write_unlock(ubi, vol_id, lnum);
818 ubi_free_vid_hdr(ubi, vid_hdr);
819 return 0;
820
821write_error:
822 if (err != -EIO || !ubi->bad_allowed) {
823 /*
824 * This flash device does not admit of bad eraseblocks or
825 * something nasty and unexpected happened. Switch to read-only
826 * mode just in case.
827 */
828 ubi_ro_mode(ubi);
829 leb_write_unlock(ubi, vol_id, lnum);
830 ubi_free_vid_hdr(ubi, vid_hdr);
831 return err;
832 }
833
834 err = ubi_wl_put_peb(ubi, pnum, 1);
835 if (err || ++tries > UBI_IO_RETRIES) {
836 ubi_ro_mode(ubi);
837 leb_write_unlock(ubi, vol_id, lnum);
838 ubi_free_vid_hdr(ubi, vid_hdr);
839 return err;
840 }
841
3261ebd7 842 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
843 ubi_msg("try another PEB");
844 goto retry;
845}
846
847/*
848 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
849 * @ubi: UBI device description object
c63a491d 850 * @vol: volume description object
801c135c
AB
851 * @lnum: logical eraseblock number
852 * @buf: data to write
853 * @len: how many bytes to write
854 * @dtype: data type
855 *
856 * This function changes the contents of a logical eraseblock atomically. @buf
857 * has to contain new logical eraseblock data, and @len - the length of the
858 * data, which has to be aligned. This function guarantees that in case of an
859 * unclean reboot the old contents is preserved. Returns zero in case of
860 * success and a negative error code in case of failure.
e8823bd6
AB
861 *
862 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
863 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
801c135c 864 */
89b96b69
AB
865int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
866 int lnum, const void *buf, int len, int dtype)
801c135c 867{
89b96b69 868 int err, pnum, tries = 0, vol_id = vol->vol_id;
801c135c
AB
869 struct ubi_vid_hdr *vid_hdr;
870 uint32_t crc;
871
e73f4459 872 ubi_assert(ubi->ref_count > 0);
d05c77a8
AB
873 ubi_assert(vol->ref_count > 0);
874
801c135c
AB
875 if (ubi->ro_mode)
876 return -EROFS;
877
33818bbb 878 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
879 if (!vid_hdr)
880 return -ENOMEM;
881
e8823bd6 882 mutex_lock(&ubi->alc_mutex);
801c135c 883 err = leb_write_lock(ubi, vol_id, lnum);
e8823bd6
AB
884 if (err)
885 goto out_mutex;
801c135c 886
3261ebd7
CH
887 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
888 vid_hdr->vol_id = cpu_to_be32(vol_id);
889 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 890 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 891 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
892
893 crc = crc32(UBI_CRC32_INIT, buf, len);
84a92580 894 vid_hdr->vol_type = UBI_VID_DYNAMIC;
3261ebd7 895 vid_hdr->data_size = cpu_to_be32(len);
801c135c 896 vid_hdr->copy_flag = 1;
3261ebd7 897 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c
AB
898
899retry:
900 pnum = ubi_wl_get_peb(ubi, dtype);
901 if (pnum < 0) {
e8823bd6
AB
902 err = pnum;
903 goto out_leb_unlock;
801c135c
AB
904 }
905
906 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
907 vol_id, lnum, vol->eba_tbl[lnum], pnum);
908
909 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
910 if (err) {
911 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
912 vol_id, lnum, pnum);
913 goto write_error;
914 }
915
916 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
917 if (err) {
918 ubi_warn("failed to write %d bytes of data to PEB %d",
919 len, pnum);
920 goto write_error;
921 }
922
a443db48
AB
923 if (vol->eba_tbl[lnum] >= 0) {
924 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
e8823bd6
AB
925 if (err)
926 goto out_leb_unlock;
801c135c
AB
927 }
928
929 vol->eba_tbl[lnum] = pnum;
e8823bd6
AB
930
931out_leb_unlock:
801c135c 932 leb_write_unlock(ubi, vol_id, lnum);
e8823bd6
AB
933out_mutex:
934 mutex_unlock(&ubi->alc_mutex);
801c135c 935 ubi_free_vid_hdr(ubi, vid_hdr);
e8823bd6 936 return err;
801c135c
AB
937
938write_error:
939 if (err != -EIO || !ubi->bad_allowed) {
940 /*
941 * This flash device does not admit of bad eraseblocks or
942 * something nasty and unexpected happened. Switch to read-only
943 * mode just in case.
944 */
945 ubi_ro_mode(ubi);
e8823bd6 946 goto out_leb_unlock;
801c135c
AB
947 }
948
949 err = ubi_wl_put_peb(ubi, pnum, 1);
950 if (err || ++tries > UBI_IO_RETRIES) {
951 ubi_ro_mode(ubi);
e8823bd6 952 goto out_leb_unlock;
801c135c
AB
953 }
954
3261ebd7 955 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
956 ubi_msg("try another PEB");
957 goto retry;
958}
959
801c135c
AB
960/**
961 * ubi_eba_copy_leb - copy logical eraseblock.
962 * @ubi: UBI device description object
963 * @from: physical eraseblock number from where to copy
964 * @to: physical eraseblock number where to copy
965 * @vid_hdr: VID header of the @from physical eraseblock
966 *
967 * This function copies logical eraseblock from physical eraseblock @from to
968 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
43f9b25a
AB
969 * function. Returns:
970 * o %0 in case of success;
971 * o %1 if the operation was canceled and should be tried later (e.g.,
972 * because a bit-flip was detected at the target PEB);
973 * o %2 if the volume is being deleted and this LEB should not be moved.
801c135c
AB
974 */
975int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
976 struct ubi_vid_hdr *vid_hdr)
977{
43f9b25a 978 int err, vol_id, lnum, data_size, aldata_size, idx;
801c135c
AB
979 struct ubi_volume *vol;
980 uint32_t crc;
801c135c 981
3261ebd7
CH
982 vol_id = be32_to_cpu(vid_hdr->vol_id);
983 lnum = be32_to_cpu(vid_hdr->lnum);
801c135c
AB
984
985 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
986
987 if (vid_hdr->vol_type == UBI_VID_STATIC) {
3261ebd7 988 data_size = be32_to_cpu(vid_hdr->data_size);
801c135c
AB
989 aldata_size = ALIGN(data_size, ubi->min_io_size);
990 } else
991 data_size = aldata_size =
3261ebd7 992 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
801c135c 993
801c135c 994 idx = vol_id2idx(ubi, vol_id);
43f9b25a 995 spin_lock(&ubi->volumes_lock);
801c135c 996 /*
43f9b25a
AB
997 * Note, we may race with volume deletion, which means that the volume
998 * this logical eraseblock belongs to might be being deleted. Since the
999 * volume deletion unmaps all the volume's logical eraseblocks, it will
1000 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
801c135c 1001 */
801c135c
AB
1002 vol = ubi->volumes[idx];
1003 if (!vol) {
43f9b25a
AB
1004 /* No need to do further work, cancel */
1005 dbg_eba("volume %d is being removed, cancel", vol_id);
801c135c 1006 spin_unlock(&ubi->volumes_lock);
43f9b25a 1007 return 2;
801c135c 1008 }
43f9b25a 1009 spin_unlock(&ubi->volumes_lock);
801c135c 1010
43f9b25a
AB
1011 /*
1012 * We do not want anybody to write to this logical eraseblock while we
1013 * are moving it, so lock it.
1014 *
1015 * Note, we are using non-waiting locking here, because we cannot sleep
1016 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1017 * unmapping the LEB which is mapped to the PEB we are going to move
1018 * (@from). This task locks the LEB and goes sleep in the
1019 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1020 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1021 * LEB is already locked, we just do not move it and return %1.
1022 */
1023 err = leb_write_trylock(ubi, vol_id, lnum);
1024 if (err) {
1025 dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
1026 return err;
801c135c 1027 }
801c135c 1028
43f9b25a
AB
1029 /*
1030 * The LEB might have been put meanwhile, and the task which put it is
1031 * probably waiting on @ubi->move_mutex. No need to continue the work,
1032 * cancel it.
1033 */
1034 if (vol->eba_tbl[lnum] != from) {
1035 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
1036 "PEB %d, cancel", vol_id, lnum, from,
1037 vol->eba_tbl[lnum]);
1038 err = 1;
1039 goto out_unlock_leb;
1040 }
801c135c 1041
43f9b25a
AB
1042 /*
1043 * OK, now the LEB is locked and we can safely start moving iy. Since
1044 * this function utilizes thie @ubi->peb1_buf buffer which is shared
1045 * with some other functions, so lock the buffer by taking the
1046 * @ubi->buf_mutex.
1047 */
1048 mutex_lock(&ubi->buf_mutex);
801c135c 1049 dbg_eba("read %d bytes of data", aldata_size);
e88d6e10 1050 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
801c135c
AB
1051 if (err && err != UBI_IO_BITFLIPS) {
1052 ubi_warn("error %d while reading data from PEB %d",
1053 err, from);
43f9b25a 1054 goto out_unlock_buf;
801c135c
AB
1055 }
1056
1057 /*
1058 * Now we have got to calculate how much data we have to to copy. In
1059 * case of a static volume it is fairly easy - the VID header contains
1060 * the data size. In case of a dynamic volume it is more difficult - we
1061 * have to read the contents, cut 0xFF bytes from the end and copy only
1062 * the first part. We must do this to avoid writing 0xFF bytes as it
1063 * may have some side-effects. And not only this. It is important not
1064 * to include those 0xFFs to CRC because later the they may be filled
1065 * by data.
1066 */
1067 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1068 aldata_size = data_size =
e88d6e10 1069 ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
801c135c
AB
1070
1071 cond_resched();
e88d6e10 1072 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
801c135c
AB
1073 cond_resched();
1074
1075 /*
1076 * It may turn out to me that the whole @from physical eraseblock
1077 * contains only 0xFF bytes. Then we have to only write the VID header
1078 * and do not write any data. This also means we should not set
1079 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1080 */
1081 if (data_size > 0) {
1082 vid_hdr->copy_flag = 1;
3261ebd7
CH
1083 vid_hdr->data_size = cpu_to_be32(data_size);
1084 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c 1085 }
3261ebd7 1086 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
1087
1088 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1089 if (err)
43f9b25a 1090 goto out_unlock_buf;
801c135c
AB
1091
1092 cond_resched();
1093
1094 /* Read the VID header back and check if it was written correctly */
1095 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1096 if (err) {
1097 if (err != UBI_IO_BITFLIPS)
1098 ubi_warn("cannot read VID header back from PEB %d", to);
43f9b25a
AB
1099 else
1100 err = 1;
1101 goto out_unlock_buf;
801c135c
AB
1102 }
1103
1104 if (data_size > 0) {
e88d6e10 1105 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
801c135c 1106 if (err)
43f9b25a 1107 goto out_unlock_buf;
801c135c 1108
e88d6e10
AB
1109 cond_resched();
1110
801c135c
AB
1111 /*
1112 * We've written the data and are going to read it back to make
1113 * sure it was written correctly.
1114 */
801c135c 1115
e88d6e10 1116 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
801c135c
AB
1117 if (err) {
1118 if (err != UBI_IO_BITFLIPS)
1119 ubi_warn("cannot read data back from PEB %d",
1120 to);
43f9b25a
AB
1121 else
1122 err = 1;
1123 goto out_unlock_buf;
801c135c
AB
1124 }
1125
1126 cond_resched();
1127
e88d6e10 1128 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
801c135c
AB
1129 ubi_warn("read data back from PEB %d - it is different",
1130 to);
43f9b25a 1131 goto out_unlock_buf;
801c135c
AB
1132 }
1133 }
1134
1135 ubi_assert(vol->eba_tbl[lnum] == from);
1136 vol->eba_tbl[lnum] = to;
1137
43f9b25a 1138out_unlock_buf:
e88d6e10 1139 mutex_unlock(&ubi->buf_mutex);
43f9b25a 1140out_unlock_leb:
801c135c 1141 leb_write_unlock(ubi, vol_id, lnum);
801c135c
AB
1142 return err;
1143}
1144
1145/**
1146 * ubi_eba_init_scan - initialize the EBA unit using scanning information.
1147 * @ubi: UBI device description object
1148 * @si: scanning information
1149 *
1150 * This function returns zero in case of success and a negative error code in
1151 * case of failure.
1152 */
1153int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1154{
1155 int i, j, err, num_volumes;
1156 struct ubi_scan_volume *sv;
1157 struct ubi_volume *vol;
1158 struct ubi_scan_leb *seb;
1159 struct rb_node *rb;
1160
1161 dbg_eba("initialize EBA unit");
1162
1163 spin_lock_init(&ubi->ltree_lock);
e8823bd6 1164 mutex_init(&ubi->alc_mutex);
801c135c
AB
1165 ubi->ltree = RB_ROOT;
1166
801c135c
AB
1167 ubi->global_sqnum = si->max_sqnum + 1;
1168 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1169
1170 for (i = 0; i < num_volumes; i++) {
1171 vol = ubi->volumes[i];
1172 if (!vol)
1173 continue;
1174
1175 cond_resched();
1176
1177 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1178 GFP_KERNEL);
1179 if (!vol->eba_tbl) {
1180 err = -ENOMEM;
1181 goto out_free;
1182 }
1183
1184 for (j = 0; j < vol->reserved_pebs; j++)
1185 vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1186
1187 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1188 if (!sv)
1189 continue;
1190
1191 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
1192 if (seb->lnum >= vol->reserved_pebs)
1193 /*
1194 * This may happen in case of an unclean reboot
1195 * during re-size.
1196 */
1197 ubi_scan_move_to_list(sv, seb, &si->erase);
1198 vol->eba_tbl[seb->lnum] = seb->pnum;
1199 }
1200 }
1201
94780d4d
AB
1202 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1203 ubi_err("no enough physical eraseblocks (%d, need %d)",
1204 ubi->avail_pebs, EBA_RESERVED_PEBS);
1205 err = -ENOSPC;
1206 goto out_free;
1207 }
1208 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1209 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1210
801c135c
AB
1211 if (ubi->bad_allowed) {
1212 ubi_calculate_reserved(ubi);
1213
1214 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1215 /* No enough free physical eraseblocks */
1216 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1217 ubi_warn("cannot reserve enough PEBs for bad PEB "
1218 "handling, reserved %d, need %d",
1219 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1220 } else
1221 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1222
1223 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1224 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1225 }
1226
1227 dbg_eba("EBA unit is initialized");
1228 return 0;
1229
1230out_free:
1231 for (i = 0; i < num_volumes; i++) {
1232 if (!ubi->volumes[i])
1233 continue;
1234 kfree(ubi->volumes[i]->eba_tbl);
1235 }
801c135c
AB
1236 return err;
1237}
1238
1239/**
1240 * ubi_eba_close - close EBA unit.
1241 * @ubi: UBI device description object
1242 */
1243void ubi_eba_close(const struct ubi_device *ubi)
1244{
1245 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1246
1247 dbg_eba("close EBA unit");
1248
1249 for (i = 0; i < num_volumes; i++) {
1250 if (!ubi->volumes[i])
1251 continue;
1252 kfree(ubi->volumes[i]->eba_tbl);
1253 }
801c135c 1254}
This page took 0.154971 seconds and 5 git commands to generate.