UBI: bugfix: dont oops with NULL module parameter
[deliverable/linux.git] / drivers / mtd / ubi / eba.c
CommitLineData
801c135c
AB
1/*
2 * Copyright (c) International Business Machines Corp., 2006
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * Author: Artem Bityutskiy (Битюцкий Артём)
19 */
20
21/*
22 * The UBI Eraseblock Association (EBA) unit.
23 *
24 * This unit is responsible for I/O to/from logical eraseblock.
25 *
26 * Although in this implementation the EBA table is fully kept and managed in
27 * RAM, which assumes poor scalability, it might be (partially) maintained on
28 * flash in future implementations.
29 *
30 * The EBA unit implements per-logical eraseblock locking. Before accessing a
31 * logical eraseblock it is locked for reading or writing. The per-logical
32 * eraseblock locking is implemented by means of the lock tree. The lock tree
33 * is an RB-tree which refers all the currently locked logical eraseblocks. The
3a8d4642 34 * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
801c135c
AB
35 * (@vol_id, @lnum) pairs.
36 *
37 * EBA also maintains the global sequence counter which is incremented each
38 * time a logical eraseblock is mapped to a physical eraseblock and it is
39 * stored in the volume identifier header. This means that each VID header has
40 * a unique sequence number. The sequence number is only increased an we assume
41 * 64 bits is enough to never overflow.
42 */
43
44#include <linux/slab.h>
45#include <linux/crc32.h>
46#include <linux/err.h>
47#include "ubi.h"
48
e8823bd6
AB
49/* Number of physical eraseblocks reserved for atomic LEB change operation */
50#define EBA_RESERVED_PEBS 1
51
801c135c
AB
52/**
53 * next_sqnum - get next sequence number.
54 * @ubi: UBI device description object
55 *
56 * This function returns next sequence number to use, which is just the current
57 * global sequence counter value. It also increases the global sequence
58 * counter.
59 */
60static unsigned long long next_sqnum(struct ubi_device *ubi)
61{
62 unsigned long long sqnum;
63
64 spin_lock(&ubi->ltree_lock);
65 sqnum = ubi->global_sqnum++;
66 spin_unlock(&ubi->ltree_lock);
67
68 return sqnum;
69}
70
71/**
72 * ubi_get_compat - get compatibility flags of a volume.
73 * @ubi: UBI device description object
74 * @vol_id: volume ID
75 *
76 * This function returns compatibility flags for an internal volume. User
77 * volumes have no compatibility flags, so %0 is returned.
78 */
79static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
80{
81 if (vol_id == UBI_LAYOUT_VOL_ID)
82 return UBI_LAYOUT_VOLUME_COMPAT;
83 return 0;
84}
85
86/**
87 * ltree_lookup - look up the lock tree.
88 * @ubi: UBI device description object
89 * @vol_id: volume ID
90 * @lnum: logical eraseblock number
91 *
3a8d4642 92 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
801c135c
AB
93 * object if the logical eraseblock is locked and %NULL if it is not.
94 * @ubi->ltree_lock has to be locked.
95 */
3a8d4642
AB
96static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
97 int lnum)
801c135c
AB
98{
99 struct rb_node *p;
100
101 p = ubi->ltree.rb_node;
102 while (p) {
3a8d4642 103 struct ubi_ltree_entry *le;
801c135c 104
3a8d4642 105 le = rb_entry(p, struct ubi_ltree_entry, rb);
801c135c
AB
106
107 if (vol_id < le->vol_id)
108 p = p->rb_left;
109 else if (vol_id > le->vol_id)
110 p = p->rb_right;
111 else {
112 if (lnum < le->lnum)
113 p = p->rb_left;
114 else if (lnum > le->lnum)
115 p = p->rb_right;
116 else
117 return le;
118 }
119 }
120
121 return NULL;
122}
123
124/**
125 * ltree_add_entry - add new entry to the lock tree.
126 * @ubi: UBI device description object
127 * @vol_id: volume ID
128 * @lnum: logical eraseblock number
129 *
130 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
131 * lock tree. If such entry is already there, its usage counter is increased.
132 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
133 * failed.
134 */
3a8d4642
AB
135static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
136 int vol_id, int lnum)
801c135c 137{
3a8d4642 138 struct ubi_ltree_entry *le, *le1, *le_free;
801c135c 139
3a8d4642 140 le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
801c135c
AB
141 if (!le)
142 return ERR_PTR(-ENOMEM);
143
144 le->vol_id = vol_id;
145 le->lnum = lnum;
146
147 spin_lock(&ubi->ltree_lock);
148 le1 = ltree_lookup(ubi, vol_id, lnum);
149
150 if (le1) {
151 /*
152 * This logical eraseblock is already locked. The newly
153 * allocated lock entry is not needed.
154 */
155 le_free = le;
156 le = le1;
157 } else {
158 struct rb_node **p, *parent = NULL;
159
160 /*
161 * No lock entry, add the newly allocated one to the
162 * @ubi->ltree RB-tree.
163 */
164 le_free = NULL;
165
166 p = &ubi->ltree.rb_node;
167 while (*p) {
168 parent = *p;
3a8d4642 169 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
801c135c
AB
170
171 if (vol_id < le1->vol_id)
172 p = &(*p)->rb_left;
173 else if (vol_id > le1->vol_id)
174 p = &(*p)->rb_right;
175 else {
176 ubi_assert(lnum != le1->lnum);
177 if (lnum < le1->lnum)
178 p = &(*p)->rb_left;
179 else
180 p = &(*p)->rb_right;
181 }
182 }
183
184 rb_link_node(&le->rb, parent, p);
185 rb_insert_color(&le->rb, &ubi->ltree);
186 }
187 le->users += 1;
188 spin_unlock(&ubi->ltree_lock);
189
190 if (le_free)
3a8d4642 191 kmem_cache_free(ubi_ltree_slab, le_free);
801c135c
AB
192
193 return le;
194}
195
196/**
197 * leb_read_lock - lock logical eraseblock for reading.
198 * @ubi: UBI device description object
199 * @vol_id: volume ID
200 * @lnum: logical eraseblock number
201 *
202 * This function locks a logical eraseblock for reading. Returns zero in case
203 * of success and a negative error code in case of failure.
204 */
205static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
206{
3a8d4642 207 struct ubi_ltree_entry *le;
801c135c
AB
208
209 le = ltree_add_entry(ubi, vol_id, lnum);
210 if (IS_ERR(le))
211 return PTR_ERR(le);
212 down_read(&le->mutex);
213 return 0;
214}
215
216/**
217 * leb_read_unlock - unlock logical eraseblock.
218 * @ubi: UBI device description object
219 * @vol_id: volume ID
220 * @lnum: logical eraseblock number
221 */
222static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
223{
224 int free = 0;
3a8d4642 225 struct ubi_ltree_entry *le;
801c135c
AB
226
227 spin_lock(&ubi->ltree_lock);
228 le = ltree_lookup(ubi, vol_id, lnum);
229 le->users -= 1;
230 ubi_assert(le->users >= 0);
231 if (le->users == 0) {
232 rb_erase(&le->rb, &ubi->ltree);
233 free = 1;
234 }
235 spin_unlock(&ubi->ltree_lock);
236
237 up_read(&le->mutex);
238 if (free)
3a8d4642 239 kmem_cache_free(ubi_ltree_slab, le);
801c135c
AB
240}
241
242/**
243 * leb_write_lock - lock logical eraseblock for writing.
244 * @ubi: UBI device description object
245 * @vol_id: volume ID
246 * @lnum: logical eraseblock number
247 *
248 * This function locks a logical eraseblock for writing. Returns zero in case
249 * of success and a negative error code in case of failure.
250 */
251static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
252{
3a8d4642 253 struct ubi_ltree_entry *le;
801c135c
AB
254
255 le = ltree_add_entry(ubi, vol_id, lnum);
256 if (IS_ERR(le))
257 return PTR_ERR(le);
258 down_write(&le->mutex);
259 return 0;
260}
261
262/**
263 * leb_write_unlock - unlock logical eraseblock.
264 * @ubi: UBI device description object
265 * @vol_id: volume ID
266 * @lnum: logical eraseblock number
267 */
268static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
269{
270 int free;
3a8d4642 271 struct ubi_ltree_entry *le;
801c135c
AB
272
273 spin_lock(&ubi->ltree_lock);
274 le = ltree_lookup(ubi, vol_id, lnum);
275 le->users -= 1;
276 ubi_assert(le->users >= 0);
277 if (le->users == 0) {
278 rb_erase(&le->rb, &ubi->ltree);
279 free = 1;
280 } else
281 free = 0;
282 spin_unlock(&ubi->ltree_lock);
283
284 up_write(&le->mutex);
285 if (free)
3a8d4642 286 kmem_cache_free(ubi_ltree_slab, le);
801c135c
AB
287}
288
289/**
290 * ubi_eba_unmap_leb - un-map logical eraseblock.
291 * @ubi: UBI device description object
292 * @vol_id: volume ID
293 * @lnum: logical eraseblock number
294 *
295 * This function un-maps logical eraseblock @lnum and schedules corresponding
296 * physical eraseblock for erasure. Returns zero in case of success and a
297 * negative error code in case of failure.
298 */
299int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
300{
301 int idx = vol_id2idx(ubi, vol_id), err, pnum;
302 struct ubi_volume *vol = ubi->volumes[idx];
303
304 if (ubi->ro_mode)
305 return -EROFS;
306
307 err = leb_write_lock(ubi, vol_id, lnum);
308 if (err)
309 return err;
310
311 pnum = vol->eba_tbl[lnum];
312 if (pnum < 0)
313 /* This logical eraseblock is already unmapped */
314 goto out_unlock;
315
316 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
317
318 vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
319 err = ubi_wl_put_peb(ubi, pnum, 0);
320
321out_unlock:
322 leb_write_unlock(ubi, vol_id, lnum);
323 return err;
324}
325
326/**
327 * ubi_eba_read_leb - read data.
328 * @ubi: UBI device description object
329 * @vol_id: volume ID
330 * @lnum: logical eraseblock number
331 * @buf: buffer to store the read data
332 * @offset: offset from where to read
333 * @len: how many bytes to read
334 * @check: data CRC check flag
335 *
336 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
337 * bytes. The @check flag only makes sense for static volumes and forces
338 * eraseblock data CRC checking.
339 *
340 * In case of success this function returns zero. In case of a static volume,
341 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
342 * returned for any volume type if an ECC error was detected by the MTD device
343 * driver. Other negative error cored may be returned in case of other errors.
344 */
345int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
346 int offset, int len, int check)
347{
348 int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
349 struct ubi_vid_hdr *vid_hdr;
350 struct ubi_volume *vol = ubi->volumes[idx];
a6343afb 351 uint32_t uninitialized_var(crc);
801c135c
AB
352
353 err = leb_read_lock(ubi, vol_id, lnum);
354 if (err)
355 return err;
356
357 pnum = vol->eba_tbl[lnum];
358 if (pnum < 0) {
359 /*
360 * The logical eraseblock is not mapped, fill the whole buffer
361 * with 0xFF bytes. The exception is static volumes for which
362 * it is an error to read unmapped logical eraseblocks.
363 */
364 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
365 len, offset, vol_id, lnum);
366 leb_read_unlock(ubi, vol_id, lnum);
367 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
368 memset(buf, 0xFF, len);
369 return 0;
370 }
371
372 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
373 len, offset, vol_id, lnum, pnum);
374
375 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
376 check = 0;
377
378retry:
379 if (check) {
33818bbb 380 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
381 if (!vid_hdr) {
382 err = -ENOMEM;
383 goto out_unlock;
384 }
385
386 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
387 if (err && err != UBI_IO_BITFLIPS) {
388 if (err > 0) {
389 /*
390 * The header is either absent or corrupted.
391 * The former case means there is a bug -
392 * switch to read-only mode just in case.
393 * The latter case means a real corruption - we
394 * may try to recover data. FIXME: but this is
395 * not implemented.
396 */
397 if (err == UBI_IO_BAD_VID_HDR) {
398 ubi_warn("bad VID header at PEB %d, LEB"
399 "%d:%d", pnum, vol_id, lnum);
400 err = -EBADMSG;
401 } else
402 ubi_ro_mode(ubi);
403 }
404 goto out_free;
405 } else if (err == UBI_IO_BITFLIPS)
406 scrub = 1;
407
3261ebd7
CH
408 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
409 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
801c135c 410
3261ebd7 411 crc = be32_to_cpu(vid_hdr->data_crc);
801c135c
AB
412 ubi_free_vid_hdr(ubi, vid_hdr);
413 }
414
415 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
416 if (err) {
417 if (err == UBI_IO_BITFLIPS) {
418 scrub = 1;
419 err = 0;
420 } else if (err == -EBADMSG) {
421 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
422 goto out_unlock;
423 scrub = 1;
424 if (!check) {
425 ubi_msg("force data checking");
426 check = 1;
427 goto retry;
428 }
429 } else
430 goto out_unlock;
431 }
432
433 if (check) {
2ab934b8 434 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
801c135c
AB
435 if (crc1 != crc) {
436 ubi_warn("CRC error: calculated %#08x, must be %#08x",
437 crc1, crc);
438 err = -EBADMSG;
439 goto out_unlock;
440 }
441 }
442
443 if (scrub)
444 err = ubi_wl_scrub_peb(ubi, pnum);
445
446 leb_read_unlock(ubi, vol_id, lnum);
447 return err;
448
449out_free:
450 ubi_free_vid_hdr(ubi, vid_hdr);
451out_unlock:
452 leb_read_unlock(ubi, vol_id, lnum);
453 return err;
454}
455
456/**
457 * recover_peb - recover from write failure.
458 * @ubi: UBI device description object
459 * @pnum: the physical eraseblock to recover
460 * @vol_id: volume ID
461 * @lnum: logical eraseblock number
462 * @buf: data which was not written because of the write failure
463 * @offset: offset of the failed write
464 * @len: how many bytes should have been written
465 *
466 * This function is called in case of a write failure and moves all good data
467 * from the potentially bad physical eraseblock to a good physical eraseblock.
468 * This function also writes the data which was not written due to the failure.
469 * Returns new physical eraseblock number in case of success, and a negative
470 * error code in case of failure.
471 */
472static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
473 const void *buf, int offset, int len)
474{
475 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
476 struct ubi_volume *vol = ubi->volumes[idx];
477 struct ubi_vid_hdr *vid_hdr;
801c135c 478
33818bbb 479 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
480 if (!vid_hdr) {
481 return -ENOMEM;
482 }
483
e88d6e10
AB
484 mutex_lock(&ubi->buf_mutex);
485
801c135c
AB
486retry:
487 new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
488 if (new_pnum < 0) {
e88d6e10 489 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
490 ubi_free_vid_hdr(ubi, vid_hdr);
491 return new_pnum;
492 }
493
494 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
495
496 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
497 if (err && err != UBI_IO_BITFLIPS) {
498 if (err > 0)
499 err = -EIO;
500 goto out_put;
501 }
502
3261ebd7 503 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
504 err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
505 if (err)
506 goto write_error;
507
508 data_size = offset + len;
e88d6e10 509 memset(ubi->peb_buf1 + offset, 0xFF, len);
801c135c
AB
510
511 /* Read everything before the area where the write failure happened */
512 if (offset > 0) {
e88d6e10
AB
513 err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
514 if (err && err != UBI_IO_BITFLIPS)
801c135c 515 goto out_put;
801c135c
AB
516 }
517
e88d6e10 518 memcpy(ubi->peb_buf1 + offset, buf, len);
801c135c 519
e88d6e10
AB
520 err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
521 if (err)
801c135c 522 goto write_error;
801c135c 523
e88d6e10 524 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
525 ubi_free_vid_hdr(ubi, vid_hdr);
526
527 vol->eba_tbl[lnum] = new_pnum;
528 ubi_wl_put_peb(ubi, pnum, 1);
529
530 ubi_msg("data was successfully recovered");
531 return 0;
532
533out_put:
e88d6e10 534 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
535 ubi_wl_put_peb(ubi, new_pnum, 1);
536 ubi_free_vid_hdr(ubi, vid_hdr);
537 return err;
538
539write_error:
540 /*
541 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
542 * get another one.
543 */
544 ubi_warn("failed to write to PEB %d", new_pnum);
545 ubi_wl_put_peb(ubi, new_pnum, 1);
546 if (++tries > UBI_IO_RETRIES) {
e88d6e10 547 mutex_unlock(&ubi->buf_mutex);
801c135c
AB
548 ubi_free_vid_hdr(ubi, vid_hdr);
549 return err;
550 }
551 ubi_msg("try again");
552 goto retry;
553}
554
555/**
556 * ubi_eba_write_leb - write data to dynamic volume.
557 * @ubi: UBI device description object
558 * @vol_id: volume ID
559 * @lnum: logical eraseblock number
560 * @buf: the data to write
561 * @offset: offset within the logical eraseblock where to write
562 * @len: how many bytes to write
563 * @dtype: data type
564 *
565 * This function writes data to logical eraseblock @lnum of a dynamic volume
566 * @vol_id. Returns zero in case of success and a negative error code in case
567 * of failure. In case of error, it is possible that something was still
568 * written to the flash media, but may be some garbage.
569 */
570int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
571 const void *buf, int offset, int len, int dtype)
572{
573 int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
574 struct ubi_volume *vol = ubi->volumes[idx];
575 struct ubi_vid_hdr *vid_hdr;
576
577 if (ubi->ro_mode)
578 return -EROFS;
579
580 err = leb_write_lock(ubi, vol_id, lnum);
581 if (err)
582 return err;
583
584 pnum = vol->eba_tbl[lnum];
585 if (pnum >= 0) {
586 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
587 len, offset, vol_id, lnum, pnum);
588
589 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
590 if (err) {
591 ubi_warn("failed to write data to PEB %d", pnum);
592 if (err == -EIO && ubi->bad_allowed)
593 err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
594 if (err)
595 ubi_ro_mode(ubi);
596 }
597 leb_write_unlock(ubi, vol_id, lnum);
598 return err;
599 }
600
601 /*
602 * The logical eraseblock is not mapped. We have to get a free physical
603 * eraseblock and write the volume identifier header there first.
604 */
33818bbb 605 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
606 if (!vid_hdr) {
607 leb_write_unlock(ubi, vol_id, lnum);
608 return -ENOMEM;
609 }
610
611 vid_hdr->vol_type = UBI_VID_DYNAMIC;
3261ebd7
CH
612 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
613 vid_hdr->vol_id = cpu_to_be32(vol_id);
614 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 615 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 616 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
617
618retry:
619 pnum = ubi_wl_get_peb(ubi, dtype);
620 if (pnum < 0) {
621 ubi_free_vid_hdr(ubi, vid_hdr);
622 leb_write_unlock(ubi, vol_id, lnum);
623 return pnum;
624 }
625
626 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
627 len, offset, vol_id, lnum, pnum);
628
629 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
630 if (err) {
631 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
632 vol_id, lnum, pnum);
633 goto write_error;
634 }
635
393852ec
AB
636 if (len) {
637 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
638 if (err) {
639 ubi_warn("failed to write %d bytes at offset %d of "
640 "LEB %d:%d, PEB %d", len, offset, vol_id,
641 lnum, pnum);
642 goto write_error;
643 }
801c135c
AB
644 }
645
646 vol->eba_tbl[lnum] = pnum;
647
648 leb_write_unlock(ubi, vol_id, lnum);
649 ubi_free_vid_hdr(ubi, vid_hdr);
650 return 0;
651
652write_error:
653 if (err != -EIO || !ubi->bad_allowed) {
654 ubi_ro_mode(ubi);
655 leb_write_unlock(ubi, vol_id, lnum);
656 ubi_free_vid_hdr(ubi, vid_hdr);
657 return err;
658 }
659
660 /*
661 * Fortunately, this is the first write operation to this physical
662 * eraseblock, so just put it and request a new one. We assume that if
663 * this physical eraseblock went bad, the erase code will handle that.
664 */
665 err = ubi_wl_put_peb(ubi, pnum, 1);
666 if (err || ++tries > UBI_IO_RETRIES) {
667 ubi_ro_mode(ubi);
668 leb_write_unlock(ubi, vol_id, lnum);
669 ubi_free_vid_hdr(ubi, vid_hdr);
670 return err;
671 }
672
3261ebd7 673 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
674 ubi_msg("try another PEB");
675 goto retry;
676}
677
678/**
679 * ubi_eba_write_leb_st - write data to static volume.
680 * @ubi: UBI device description object
681 * @vol_id: volume ID
682 * @lnum: logical eraseblock number
683 * @buf: data to write
684 * @len: how many bytes to write
685 * @dtype: data type
686 * @used_ebs: how many logical eraseblocks will this volume contain
687 *
688 * This function writes data to logical eraseblock @lnum of static volume
689 * @vol_id. The @used_ebs argument should contain total number of logical
690 * eraseblock in this static volume.
691 *
692 * When writing to the last logical eraseblock, the @len argument doesn't have
693 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
694 * to the real data size, although the @buf buffer has to contain the
695 * alignment. In all other cases, @len has to be aligned.
696 *
697 * It is prohibited to write more then once to logical eraseblocks of static
698 * volumes. This function returns zero in case of success and a negative error
699 * code in case of failure.
700 */
701int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
702 const void *buf, int len, int dtype, int used_ebs)
703{
704 int err, pnum, tries = 0, data_size = len;
705 int idx = vol_id2idx(ubi, vol_id);
706 struct ubi_volume *vol = ubi->volumes[idx];
707 struct ubi_vid_hdr *vid_hdr;
708 uint32_t crc;
709
710 if (ubi->ro_mode)
711 return -EROFS;
712
713 if (lnum == used_ebs - 1)
714 /* If this is the last LEB @len may be unaligned */
715 len = ALIGN(data_size, ubi->min_io_size);
716 else
717 ubi_assert(len % ubi->min_io_size == 0);
718
33818bbb 719 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
720 if (!vid_hdr)
721 return -ENOMEM;
722
723 err = leb_write_lock(ubi, vol_id, lnum);
724 if (err) {
725 ubi_free_vid_hdr(ubi, vid_hdr);
726 return err;
727 }
728
3261ebd7
CH
729 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
730 vid_hdr->vol_id = cpu_to_be32(vol_id);
731 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 732 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 733 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
734
735 crc = crc32(UBI_CRC32_INIT, buf, data_size);
736 vid_hdr->vol_type = UBI_VID_STATIC;
3261ebd7
CH
737 vid_hdr->data_size = cpu_to_be32(data_size);
738 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
739 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c
AB
740
741retry:
742 pnum = ubi_wl_get_peb(ubi, dtype);
743 if (pnum < 0) {
744 ubi_free_vid_hdr(ubi, vid_hdr);
745 leb_write_unlock(ubi, vol_id, lnum);
746 return pnum;
747 }
748
749 dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
750 len, vol_id, lnum, pnum, used_ebs);
751
752 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
753 if (err) {
754 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
755 vol_id, lnum, pnum);
756 goto write_error;
757 }
758
759 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
760 if (err) {
761 ubi_warn("failed to write %d bytes of data to PEB %d",
762 len, pnum);
763 goto write_error;
764 }
765
766 ubi_assert(vol->eba_tbl[lnum] < 0);
767 vol->eba_tbl[lnum] = pnum;
768
769 leb_write_unlock(ubi, vol_id, lnum);
770 ubi_free_vid_hdr(ubi, vid_hdr);
771 return 0;
772
773write_error:
774 if (err != -EIO || !ubi->bad_allowed) {
775 /*
776 * This flash device does not admit of bad eraseblocks or
777 * something nasty and unexpected happened. Switch to read-only
778 * mode just in case.
779 */
780 ubi_ro_mode(ubi);
781 leb_write_unlock(ubi, vol_id, lnum);
782 ubi_free_vid_hdr(ubi, vid_hdr);
783 return err;
784 }
785
786 err = ubi_wl_put_peb(ubi, pnum, 1);
787 if (err || ++tries > UBI_IO_RETRIES) {
788 ubi_ro_mode(ubi);
789 leb_write_unlock(ubi, vol_id, lnum);
790 ubi_free_vid_hdr(ubi, vid_hdr);
791 return err;
792 }
793
3261ebd7 794 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
795 ubi_msg("try another PEB");
796 goto retry;
797}
798
799/*
800 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
801 * @ubi: UBI device description object
802 * @vol_id: volume ID
803 * @lnum: logical eraseblock number
804 * @buf: data to write
805 * @len: how many bytes to write
806 * @dtype: data type
807 *
808 * This function changes the contents of a logical eraseblock atomically. @buf
809 * has to contain new logical eraseblock data, and @len - the length of the
810 * data, which has to be aligned. This function guarantees that in case of an
811 * unclean reboot the old contents is preserved. Returns zero in case of
812 * success and a negative error code in case of failure.
e8823bd6
AB
813 *
814 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
815 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
801c135c
AB
816 */
817int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
818 const void *buf, int len, int dtype)
819{
820 int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
821 struct ubi_volume *vol = ubi->volumes[idx];
822 struct ubi_vid_hdr *vid_hdr;
823 uint32_t crc;
824
825 if (ubi->ro_mode)
826 return -EROFS;
827
33818bbb 828 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
801c135c
AB
829 if (!vid_hdr)
830 return -ENOMEM;
831
e8823bd6 832 mutex_lock(&ubi->alc_mutex);
801c135c 833 err = leb_write_lock(ubi, vol_id, lnum);
e8823bd6
AB
834 if (err)
835 goto out_mutex;
801c135c 836
3261ebd7
CH
837 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
838 vid_hdr->vol_id = cpu_to_be32(vol_id);
839 vid_hdr->lnum = cpu_to_be32(lnum);
801c135c 840 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
3261ebd7 841 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
801c135c
AB
842
843 crc = crc32(UBI_CRC32_INIT, buf, len);
84a92580 844 vid_hdr->vol_type = UBI_VID_DYNAMIC;
3261ebd7 845 vid_hdr->data_size = cpu_to_be32(len);
801c135c 846 vid_hdr->copy_flag = 1;
3261ebd7 847 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c
AB
848
849retry:
850 pnum = ubi_wl_get_peb(ubi, dtype);
851 if (pnum < 0) {
e8823bd6
AB
852 err = pnum;
853 goto out_leb_unlock;
801c135c
AB
854 }
855
856 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
857 vol_id, lnum, vol->eba_tbl[lnum], pnum);
858
859 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
860 if (err) {
861 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
862 vol_id, lnum, pnum);
863 goto write_error;
864 }
865
866 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
867 if (err) {
868 ubi_warn("failed to write %d bytes of data to PEB %d",
869 len, pnum);
870 goto write_error;
871 }
872
a443db48
AB
873 if (vol->eba_tbl[lnum] >= 0) {
874 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
e8823bd6
AB
875 if (err)
876 goto out_leb_unlock;
801c135c
AB
877 }
878
879 vol->eba_tbl[lnum] = pnum;
e8823bd6
AB
880
881out_leb_unlock:
801c135c 882 leb_write_unlock(ubi, vol_id, lnum);
e8823bd6
AB
883out_mutex:
884 mutex_unlock(&ubi->alc_mutex);
801c135c 885 ubi_free_vid_hdr(ubi, vid_hdr);
e8823bd6 886 return err;
801c135c
AB
887
888write_error:
889 if (err != -EIO || !ubi->bad_allowed) {
890 /*
891 * This flash device does not admit of bad eraseblocks or
892 * something nasty and unexpected happened. Switch to read-only
893 * mode just in case.
894 */
895 ubi_ro_mode(ubi);
e8823bd6 896 goto out_leb_unlock;
801c135c
AB
897 }
898
899 err = ubi_wl_put_peb(ubi, pnum, 1);
900 if (err || ++tries > UBI_IO_RETRIES) {
901 ubi_ro_mode(ubi);
e8823bd6 902 goto out_leb_unlock;
801c135c
AB
903 }
904
3261ebd7 905 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
906 ubi_msg("try another PEB");
907 goto retry;
908}
909
801c135c
AB
910/**
911 * ubi_eba_copy_leb - copy logical eraseblock.
912 * @ubi: UBI device description object
913 * @from: physical eraseblock number from where to copy
914 * @to: physical eraseblock number where to copy
915 * @vid_hdr: VID header of the @from physical eraseblock
916 *
917 * This function copies logical eraseblock from physical eraseblock @from to
918 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
919 * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
920 * was canceled because bit-flips were detected at the target PEB, and a
921 * negative error code in case of failure.
922 */
923int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
924 struct ubi_vid_hdr *vid_hdr)
925{
926 int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
927 struct ubi_volume *vol;
928 uint32_t crc;
801c135c 929
3261ebd7
CH
930 vol_id = be32_to_cpu(vid_hdr->vol_id);
931 lnum = be32_to_cpu(vid_hdr->lnum);
801c135c
AB
932
933 dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
934
935 if (vid_hdr->vol_type == UBI_VID_STATIC) {
3261ebd7 936 data_size = be32_to_cpu(vid_hdr->data_size);
801c135c
AB
937 aldata_size = ALIGN(data_size, ubi->min_io_size);
938 } else
939 data_size = aldata_size =
3261ebd7 940 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
801c135c 941
801c135c
AB
942 /*
943 * We do not want anybody to write to this logical eraseblock while we
944 * are moving it, so we lock it.
945 */
946 err = leb_write_lock(ubi, vol_id, lnum);
e88d6e10 947 if (err)
801c135c 948 return err;
e88d6e10
AB
949
950 mutex_lock(&ubi->buf_mutex);
801c135c
AB
951
952 /*
953 * But the logical eraseblock might have been put by this time.
954 * Cancel if it is true.
955 */
956 idx = vol_id2idx(ubi, vol_id);
957
958 /*
959 * We may race with volume deletion/re-size, so we have to hold
960 * @ubi->volumes_lock.
961 */
962 spin_lock(&ubi->volumes_lock);
963 vol = ubi->volumes[idx];
964 if (!vol) {
965 dbg_eba("volume %d was removed meanwhile", vol_id);
966 spin_unlock(&ubi->volumes_lock);
967 goto out_unlock;
968 }
969
970 pnum = vol->eba_tbl[lnum];
971 if (pnum != from) {
972 dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
973 "PEB %d, cancel", vol_id, lnum, from, pnum);
974 spin_unlock(&ubi->volumes_lock);
975 goto out_unlock;
976 }
977 spin_unlock(&ubi->volumes_lock);
978
979 /* OK, now the LEB is locked and we can safely start moving it */
980
981 dbg_eba("read %d bytes of data", aldata_size);
e88d6e10 982 err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
801c135c
AB
983 if (err && err != UBI_IO_BITFLIPS) {
984 ubi_warn("error %d while reading data from PEB %d",
985 err, from);
986 goto out_unlock;
987 }
988
989 /*
990 * Now we have got to calculate how much data we have to to copy. In
991 * case of a static volume it is fairly easy - the VID header contains
992 * the data size. In case of a dynamic volume it is more difficult - we
993 * have to read the contents, cut 0xFF bytes from the end and copy only
994 * the first part. We must do this to avoid writing 0xFF bytes as it
995 * may have some side-effects. And not only this. It is important not
996 * to include those 0xFFs to CRC because later the they may be filled
997 * by data.
998 */
999 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1000 aldata_size = data_size =
e88d6e10 1001 ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
801c135c
AB
1002
1003 cond_resched();
e88d6e10 1004 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
801c135c
AB
1005 cond_resched();
1006
1007 /*
1008 * It may turn out to me that the whole @from physical eraseblock
1009 * contains only 0xFF bytes. Then we have to only write the VID header
1010 * and do not write any data. This also means we should not set
1011 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1012 */
1013 if (data_size > 0) {
1014 vid_hdr->copy_flag = 1;
3261ebd7
CH
1015 vid_hdr->data_size = cpu_to_be32(data_size);
1016 vid_hdr->data_crc = cpu_to_be32(crc);
801c135c 1017 }
3261ebd7 1018 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
801c135c
AB
1019
1020 err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
1021 if (err)
1022 goto out_unlock;
1023
1024 cond_resched();
1025
1026 /* Read the VID header back and check if it was written correctly */
1027 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1028 if (err) {
1029 if (err != UBI_IO_BITFLIPS)
1030 ubi_warn("cannot read VID header back from PEB %d", to);
1031 goto out_unlock;
1032 }
1033
1034 if (data_size > 0) {
e88d6e10 1035 err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
801c135c
AB
1036 if (err)
1037 goto out_unlock;
1038
e88d6e10
AB
1039 cond_resched();
1040
801c135c
AB
1041 /*
1042 * We've written the data and are going to read it back to make
1043 * sure it was written correctly.
1044 */
801c135c 1045
e88d6e10 1046 err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
801c135c
AB
1047 if (err) {
1048 if (err != UBI_IO_BITFLIPS)
1049 ubi_warn("cannot read data back from PEB %d",
1050 to);
1051 goto out_unlock;
1052 }
1053
1054 cond_resched();
1055
e88d6e10 1056 if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
801c135c
AB
1057 ubi_warn("read data back from PEB %d - it is different",
1058 to);
1059 goto out_unlock;
1060 }
1061 }
1062
1063 ubi_assert(vol->eba_tbl[lnum] == from);
1064 vol->eba_tbl[lnum] = to;
1065
801c135c 1066out_unlock:
e88d6e10 1067 mutex_unlock(&ubi->buf_mutex);
801c135c 1068 leb_write_unlock(ubi, vol_id, lnum);
801c135c
AB
1069 return err;
1070}
1071
1072/**
1073 * ubi_eba_init_scan - initialize the EBA unit using scanning information.
1074 * @ubi: UBI device description object
1075 * @si: scanning information
1076 *
1077 * This function returns zero in case of success and a negative error code in
1078 * case of failure.
1079 */
1080int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1081{
1082 int i, j, err, num_volumes;
1083 struct ubi_scan_volume *sv;
1084 struct ubi_volume *vol;
1085 struct ubi_scan_leb *seb;
1086 struct rb_node *rb;
1087
1088 dbg_eba("initialize EBA unit");
1089
1090 spin_lock_init(&ubi->ltree_lock);
e8823bd6 1091 mutex_init(&ubi->alc_mutex);
801c135c
AB
1092 ubi->ltree = RB_ROOT;
1093
801c135c
AB
1094 ubi->global_sqnum = si->max_sqnum + 1;
1095 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1096
1097 for (i = 0; i < num_volumes; i++) {
1098 vol = ubi->volumes[i];
1099 if (!vol)
1100 continue;
1101
1102 cond_resched();
1103
1104 vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
1105 GFP_KERNEL);
1106 if (!vol->eba_tbl) {
1107 err = -ENOMEM;
1108 goto out_free;
1109 }
1110
1111 for (j = 0; j < vol->reserved_pebs; j++)
1112 vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
1113
1114 sv = ubi_scan_find_sv(si, idx2vol_id(ubi, i));
1115 if (!sv)
1116 continue;
1117
1118 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) {
1119 if (seb->lnum >= vol->reserved_pebs)
1120 /*
1121 * This may happen in case of an unclean reboot
1122 * during re-size.
1123 */
1124 ubi_scan_move_to_list(sv, seb, &si->erase);
1125 vol->eba_tbl[seb->lnum] = seb->pnum;
1126 }
1127 }
1128
94780d4d
AB
1129 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1130 ubi_err("no enough physical eraseblocks (%d, need %d)",
1131 ubi->avail_pebs, EBA_RESERVED_PEBS);
1132 err = -ENOSPC;
1133 goto out_free;
1134 }
1135 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1136 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1137
801c135c
AB
1138 if (ubi->bad_allowed) {
1139 ubi_calculate_reserved(ubi);
1140
1141 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1142 /* No enough free physical eraseblocks */
1143 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1144 ubi_warn("cannot reserve enough PEBs for bad PEB "
1145 "handling, reserved %d, need %d",
1146 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1147 } else
1148 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1149
1150 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1151 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1152 }
1153
1154 dbg_eba("EBA unit is initialized");
1155 return 0;
1156
1157out_free:
1158 for (i = 0; i < num_volumes; i++) {
1159 if (!ubi->volumes[i])
1160 continue;
1161 kfree(ubi->volumes[i]->eba_tbl);
1162 }
801c135c
AB
1163 return err;
1164}
1165
1166/**
1167 * ubi_eba_close - close EBA unit.
1168 * @ubi: UBI device description object
1169 */
1170void ubi_eba_close(const struct ubi_device *ubi)
1171{
1172 int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1173
1174 dbg_eba("close EBA unit");
1175
1176 for (i = 0; i < num_volumes; i++) {
1177 if (!ubi->volumes[i])
1178 continue;
1179 kfree(ubi->volumes[i]->eba_tbl);
1180 }
801c135c 1181}
This page took 0.231618 seconds and 5 git commands to generate.