UBI: Fix stale pointers in ubi->lookuptbl
[deliverable/linux.git] / drivers / mtd / ubi / fastmap.c
CommitLineData
dbb7d2a8
RW
1/*
2 * Copyright (c) 2012 Linutronix GmbH
3 * Author: Richard Weinberger <richard@nod.at>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
13 *
14 */
15
16#include <linux/crc32.h>
17#include "ubi.h"
18
19/**
20 * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
21 * @ubi: UBI device description object
22 */
23size_t ubi_calc_fm_size(struct ubi_device *ubi)
24{
25 size_t size;
26
91401a34
RW
27 size = sizeof(struct ubi_fm_sb) + \
28 sizeof(struct ubi_fm_hdr) + \
dbb7d2a8
RW
29 sizeof(struct ubi_fm_scan_pool) + \
30 sizeof(struct ubi_fm_scan_pool) + \
31 (ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
32 (sizeof(struct ubi_fm_eba) + \
33 (ubi->peb_count * sizeof(__be32))) + \
34 sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
35 return roundup(size, ubi->leb_size);
36}
37
38
39/**
40 * new_fm_vhdr - allocate a new volume header for fastmap usage.
41 * @ubi: UBI device description object
42 * @vol_id: the VID of the new header
43 *
44 * Returns a new struct ubi_vid_hdr on success.
45 * NULL indicates out of memory.
46 */
47static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
48{
49 struct ubi_vid_hdr *new;
50
51 new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
52 if (!new)
53 goto out;
54
55 new->vol_type = UBI_VID_DYNAMIC;
56 new->vol_id = cpu_to_be32(vol_id);
57
58 /* UBI implementations without fastmap support have to delete the
59 * fastmap.
60 */
61 new->compat = UBI_COMPAT_DELETE;
62
63out:
64 return new;
65}
66
67/**
68 * add_aeb - create and add a attach erase block to a given list.
69 * @ai: UBI attach info object
70 * @list: the target list
71 * @pnum: PEB number of the new attach erase block
72 * @ec: erease counter of the new LEB
73 * @scrub: scrub this PEB after attaching
74 *
75 * Returns 0 on success, < 0 indicates an internal error.
76 */
77static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
78 int pnum, int ec, int scrub)
79{
80 struct ubi_ainf_peb *aeb;
81
82 aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
83 if (!aeb)
84 return -ENOMEM;
85
86 aeb->pnum = pnum;
87 aeb->ec = ec;
88 aeb->lnum = -1;
89 aeb->scrub = scrub;
90 aeb->copy_flag = aeb->sqnum = 0;
91
92 ai->ec_sum += aeb->ec;
93 ai->ec_count++;
94
95 if (ai->max_ec < aeb->ec)
96 ai->max_ec = aeb->ec;
97
98 if (ai->min_ec > aeb->ec)
99 ai->min_ec = aeb->ec;
100
101 list_add_tail(&aeb->u.list, list);
102
103 return 0;
104}
105
106/**
107 * add_vol - create and add a new volume to ubi_attach_info.
108 * @ai: ubi_attach_info object
109 * @vol_id: VID of the new volume
110 * @used_ebs: number of used EBS
111 * @data_pad: data padding value of the new volume
112 * @vol_type: volume type
113 * @last_eb_bytes: number of bytes in the last LEB
114 *
115 * Returns the new struct ubi_ainf_volume on success.
116 * NULL indicates an error.
117 */
118static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
119 int used_ebs, int data_pad, u8 vol_type,
120 int last_eb_bytes)
121{
122 struct ubi_ainf_volume *av;
123 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
124
125 while (*p) {
126 parent = *p;
127 av = rb_entry(parent, struct ubi_ainf_volume, rb);
128
e9110361 129 if (vol_id > av->vol_id)
dbb7d2a8 130 p = &(*p)->rb_left;
604b592e 131 else
dbb7d2a8
RW
132 p = &(*p)->rb_right;
133 }
134
135 av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
136 if (!av)
137 goto out;
138
42dd3cdc 139 av->highest_lnum = av->leb_count = av->used_ebs = 0;
dbb7d2a8 140 av->vol_id = vol_id;
dbb7d2a8
RW
141 av->data_pad = data_pad;
142 av->last_data_size = last_eb_bytes;
143 av->compat = 0;
144 av->vol_type = vol_type;
145 av->root = RB_ROOT;
42dd3cdc
RW
146 if (av->vol_type == UBI_STATIC_VOLUME)
147 av->used_ebs = used_ebs;
dbb7d2a8
RW
148
149 dbg_bld("found volume (ID %i)", vol_id);
150
151 rb_link_node(&av->rb, parent, p);
152 rb_insert_color(&av->rb, &ai->volumes);
153
154out:
155 return av;
156}
157
158/**
159 * assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
160 * from it's original list.
161 * @ai: ubi_attach_info object
162 * @aeb: the to be assigned SEB
163 * @av: target scan volume
164 */
165static void assign_aeb_to_av(struct ubi_attach_info *ai,
166 struct ubi_ainf_peb *aeb,
167 struct ubi_ainf_volume *av)
168{
169 struct ubi_ainf_peb *tmp_aeb;
170 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
171
172 p = &av->root.rb_node;
173 while (*p) {
174 parent = *p;
175
176 tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
177 if (aeb->lnum != tmp_aeb->lnum) {
178 if (aeb->lnum < tmp_aeb->lnum)
179 p = &(*p)->rb_left;
180 else
181 p = &(*p)->rb_right;
182
183 continue;
184 } else
185 break;
186 }
187
188 list_del(&aeb->u.list);
189 av->leb_count++;
190
191 rb_link_node(&aeb->u.rb, parent, p);
192 rb_insert_color(&aeb->u.rb, &av->root);
193}
194
195/**
196 * update_vol - inserts or updates a LEB which was found a pool.
197 * @ubi: the UBI device object
198 * @ai: attach info object
199 * @av: the volume this LEB belongs to
200 * @new_vh: the volume header derived from new_aeb
201 * @new_aeb: the AEB to be examined
202 *
203 * Returns 0 on success, < 0 indicates an internal error.
204 */
205static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
206 struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
207 struct ubi_ainf_peb *new_aeb)
208{
209 struct rb_node **p = &av->root.rb_node, *parent = NULL;
210 struct ubi_ainf_peb *aeb, *victim;
211 int cmp_res;
212
213 while (*p) {
214 parent = *p;
215 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
216
217 if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
218 if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
219 p = &(*p)->rb_left;
220 else
221 p = &(*p)->rb_right;
222
223 continue;
224 }
225
226 /* This case can happen if the fastmap gets written
227 * because of a volume change (creation, deletion, ..).
228 * Then a PEB can be within the persistent EBA and the pool.
229 */
230 if (aeb->pnum == new_aeb->pnum) {
231 ubi_assert(aeb->lnum == new_aeb->lnum);
232 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
233
234 return 0;
235 }
236
237 cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
238 if (cmp_res < 0)
239 return cmp_res;
240
241 /* new_aeb is newer */
242 if (cmp_res & 1) {
243 victim = kmem_cache_alloc(ai->aeb_slab_cache,
244 GFP_KERNEL);
245 if (!victim)
246 return -ENOMEM;
247
248 victim->ec = aeb->ec;
249 victim->pnum = aeb->pnum;
250 list_add_tail(&victim->u.list, &ai->erase);
251
252 if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
253 av->last_data_size = \
254 be32_to_cpu(new_vh->data_size);
255
256 dbg_bld("vol %i: AEB %i's PEB %i is the newer",
257 av->vol_id, aeb->lnum, new_aeb->pnum);
258
259 aeb->ec = new_aeb->ec;
260 aeb->pnum = new_aeb->pnum;
261 aeb->copy_flag = new_vh->copy_flag;
262 aeb->scrub = new_aeb->scrub;
263 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
264
265 /* new_aeb is older */
266 } else {
267 dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
268 av->vol_id, aeb->lnum, new_aeb->pnum);
269 list_add_tail(&new_aeb->u.list, &ai->erase);
270 }
271
272 return 0;
273 }
274 /* This LEB is new, let's add it to the volume */
275
276 if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
277 av->highest_lnum = be32_to_cpu(new_vh->lnum);
278 av->last_data_size = be32_to_cpu(new_vh->data_size);
279 }
280
281 if (av->vol_type == UBI_STATIC_VOLUME)
282 av->used_ebs = be32_to_cpu(new_vh->used_ebs);
283
284 av->leb_count++;
285
286 rb_link_node(&new_aeb->u.rb, parent, p);
287 rb_insert_color(&new_aeb->u.rb, &av->root);
288
289 return 0;
290}
291
292/**
293 * process_pool_aeb - we found a non-empty PEB in a pool.
294 * @ubi: UBI device object
295 * @ai: attach info object
296 * @new_vh: the volume header derived from new_aeb
297 * @new_aeb: the AEB to be examined
298 *
299 * Returns 0 on success, < 0 indicates an internal error.
300 */
301static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
302 struct ubi_vid_hdr *new_vh,
303 struct ubi_ainf_peb *new_aeb)
304{
305 struct ubi_ainf_volume *av, *tmp_av = NULL;
306 struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
307 int found = 0;
308
309 if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
310 be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
311 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
312
313 return 0;
314 }
315
316 /* Find the volume this SEB belongs to */
317 while (*p) {
318 parent = *p;
319 tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
320
321 if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
322 p = &(*p)->rb_left;
323 else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
324 p = &(*p)->rb_right;
325 else {
326 found = 1;
327 break;
328 }
329 }
330
331 if (found)
332 av = tmp_av;
333 else {
32608703 334 ubi_err(ubi, "orphaned volume in fastmap pool!");
1bf1890e 335 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
dbb7d2a8
RW
336 return UBI_BAD_FASTMAP;
337 }
338
339 ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
340
341 return update_vol(ubi, ai, av, new_vh, new_aeb);
342}
343
344/**
345 * unmap_peb - unmap a PEB.
346 * If fastmap detects a free PEB in the pool it has to check whether
347 * this PEB has been unmapped after writing the fastmap.
348 *
349 * @ai: UBI attach info object
350 * @pnum: The PEB to be unmapped
351 */
352static void unmap_peb(struct ubi_attach_info *ai, int pnum)
353{
354 struct ubi_ainf_volume *av;
355 struct rb_node *node, *node2;
356 struct ubi_ainf_peb *aeb;
357
358 for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
359 av = rb_entry(node, struct ubi_ainf_volume, rb);
360
361 for (node2 = rb_first(&av->root); node2;
362 node2 = rb_next(node2)) {
363 aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
364 if (aeb->pnum == pnum) {
365 rb_erase(&aeb->u.rb, &av->root);
ad3d6a05 366 av->leb_count--;
dbb7d2a8
RW
367 kmem_cache_free(ai->aeb_slab_cache, aeb);
368 return;
369 }
370 }
371 }
372}
373
374/**
375 * scan_pool - scans a pool for changed (no longer empty PEBs).
376 * @ubi: UBI device object
377 * @ai: attach info object
378 * @pebs: an array of all PEB numbers in the to be scanned pool
379 * @pool_size: size of the pool (number of entries in @pebs)
380 * @max_sqnum: pointer to the maximal sequence number
dbb7d2a8
RW
381 * @free: list of PEBs which are most likely free (and go into @ai->free)
382 *
383 * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
384 * < 0 indicates an internal error.
385 */
386static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
387 int *pebs, int pool_size, unsigned long long *max_sqnum,
d141a8ef 388 struct list_head *free)
dbb7d2a8
RW
389{
390 struct ubi_vid_hdr *vh;
391 struct ubi_ec_hdr *ech;
d141a8ef
RW
392 struct ubi_ainf_peb *new_aeb;
393 int i, pnum, err, ret = 0;
dbb7d2a8
RW
394
395 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
396 if (!ech)
397 return -ENOMEM;
398
399 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
400 if (!vh) {
401 kfree(ech);
402 return -ENOMEM;
403 }
404
405 dbg_bld("scanning fastmap pool: size = %i", pool_size);
406
407 /*
408 * Now scan all PEBs in the pool to find changes which have been made
409 * after the creation of the fastmap
410 */
411 for (i = 0; i < pool_size; i++) {
412 int scrub = 0;
c22301ad 413 int image_seq;
dbb7d2a8
RW
414
415 pnum = be32_to_cpu(pebs[i]);
416
417 if (ubi_io_is_bad(ubi, pnum)) {
32608703 418 ubi_err(ubi, "bad PEB in fastmap pool!");
dbb7d2a8
RW
419 ret = UBI_BAD_FASTMAP;
420 goto out;
421 }
422
423 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
424 if (err && err != UBI_IO_BITFLIPS) {
32608703 425 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
dbb7d2a8
RW
426 pnum, err);
427 ret = err > 0 ? UBI_BAD_FASTMAP : err;
428 goto out;
44305ebd 429 } else if (err == UBI_IO_BITFLIPS)
dbb7d2a8
RW
430 scrub = 1;
431
c22301ad
RG
432 /*
433 * Older UBI implementations have image_seq set to zero, so
434 * we shouldn't fail if image_seq == 0.
435 */
436 image_seq = be32_to_cpu(ech->image_seq);
437
438 if (image_seq && (image_seq != ubi->image_seq)) {
32608703 439 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
dbb7d2a8 440 be32_to_cpu(ech->image_seq), ubi->image_seq);
f240dca8 441 ret = UBI_BAD_FASTMAP;
dbb7d2a8
RW
442 goto out;
443 }
444
445 err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
446 if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
447 unsigned long long ec = be64_to_cpu(ech->ec);
448 unmap_peb(ai, pnum);
449 dbg_bld("Adding PEB to free: %i", pnum);
450 if (err == UBI_IO_FF_BITFLIPS)
451 add_aeb(ai, free, pnum, ec, 1);
452 else
453 add_aeb(ai, free, pnum, ec, 0);
454 continue;
455 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
456 dbg_bld("Found non empty PEB:%i in pool", pnum);
457
458 if (err == UBI_IO_BITFLIPS)
459 scrub = 1;
460
dbb7d2a8
RW
461 new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
462 GFP_KERNEL);
463 if (!new_aeb) {
464 ret = -ENOMEM;
465 goto out;
466 }
467
468 new_aeb->ec = be64_to_cpu(ech->ec);
469 new_aeb->pnum = pnum;
470 new_aeb->lnum = be32_to_cpu(vh->lnum);
471 new_aeb->sqnum = be64_to_cpu(vh->sqnum);
472 new_aeb->copy_flag = vh->copy_flag;
473 new_aeb->scrub = scrub;
474
475 if (*max_sqnum < new_aeb->sqnum)
476 *max_sqnum = new_aeb->sqnum;
477
478 err = process_pool_aeb(ubi, ai, vh, new_aeb);
479 if (err) {
480 ret = err > 0 ? UBI_BAD_FASTMAP : err;
481 goto out;
482 }
483 } else {
484 /* We are paranoid and fall back to scanning mode */
32608703 485 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
dbb7d2a8
RW
486 ret = err > 0 ? UBI_BAD_FASTMAP : err;
487 goto out;
488 }
489
490 }
491
492out:
493 ubi_free_vid_hdr(ubi, vh);
494 kfree(ech);
495 return ret;
496}
497
498/**
499 * count_fastmap_pebs - Counts the PEBs found by fastmap.
500 * @ai: The UBI attach info object
501 */
502static int count_fastmap_pebs(struct ubi_attach_info *ai)
503{
504 struct ubi_ainf_peb *aeb;
505 struct ubi_ainf_volume *av;
506 struct rb_node *rb1, *rb2;
507 int n = 0;
508
509 list_for_each_entry(aeb, &ai->erase, u.list)
510 n++;
511
512 list_for_each_entry(aeb, &ai->free, u.list)
513 n++;
514
515 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
516 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
517 n++;
518
519 return n;
520}
521
522/**
523 * ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
524 * @ubi: UBI device object
525 * @ai: UBI attach info object
526 * @fm: the fastmap to be attached
527 *
528 * Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
529 * < 0 indicates an internal error.
530 */
531static int ubi_attach_fastmap(struct ubi_device *ubi,
532 struct ubi_attach_info *ai,
533 struct ubi_fastmap_layout *fm)
534{
d141a8ef 535 struct list_head used, free;
dbb7d2a8
RW
536 struct ubi_ainf_volume *av;
537 struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
dbb7d2a8
RW
538 struct ubi_fm_sb *fmsb;
539 struct ubi_fm_hdr *fmhdr;
540 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
541 struct ubi_fm_ec *fmec;
542 struct ubi_fm_volhdr *fmvhdr;
543 struct ubi_fm_eba *fm_eba;
544 int ret, i, j, pool_size, wl_pool_size;
545 size_t fm_pos = 0, fm_size = ubi->fm_size;
546 unsigned long long max_sqnum = 0;
547 void *fm_raw = ubi->fm_buf;
548
549 INIT_LIST_HEAD(&used);
550 INIT_LIST_HEAD(&free);
dbb7d2a8
RW
551 ai->min_ec = UBI_MAX_ERASECOUNTER;
552
dbb7d2a8
RW
553 fmsb = (struct ubi_fm_sb *)(fm_raw);
554 ai->max_sqnum = fmsb->sqnum;
555 fm_pos += sizeof(struct ubi_fm_sb);
556 if (fm_pos >= fm_size)
557 goto fail_bad;
558
559 fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
560 fm_pos += sizeof(*fmhdr);
561 if (fm_pos >= fm_size)
562 goto fail_bad;
563
564 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
32608703 565 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
566 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
567 goto fail_bad;
568 }
569
570 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
571 fm_pos += sizeof(*fmpl1);
572 if (fm_pos >= fm_size)
573 goto fail_bad;
574 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
32608703 575 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
576 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
577 goto fail_bad;
578 }
579
580 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
581 fm_pos += sizeof(*fmpl2);
582 if (fm_pos >= fm_size)
583 goto fail_bad;
584 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
32608703 585 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
586 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
587 goto fail_bad;
588 }
589
590 pool_size = be16_to_cpu(fmpl1->size);
591 wl_pool_size = be16_to_cpu(fmpl2->size);
592 fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
593 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
594
595 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
32608703 596 ubi_err(ubi, "bad pool size: %i", pool_size);
dbb7d2a8
RW
597 goto fail_bad;
598 }
599
600 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
32608703 601 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
dbb7d2a8
RW
602 goto fail_bad;
603 }
604
605
606 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
607 fm->max_pool_size < 0) {
32608703 608 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
dbb7d2a8
RW
609 goto fail_bad;
610 }
611
612 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
613 fm->max_wl_pool_size < 0) {
32608703
TB
614 ubi_err(ubi, "bad maximal WL pool size: %i",
615 fm->max_wl_pool_size);
dbb7d2a8
RW
616 goto fail_bad;
617 }
618
619 /* read EC values from free list */
620 for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
621 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
622 fm_pos += sizeof(*fmec);
623 if (fm_pos >= fm_size)
624 goto fail_bad;
625
626 add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
627 be32_to_cpu(fmec->ec), 0);
628 }
629
630 /* read EC values from used list */
631 for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
632 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
633 fm_pos += sizeof(*fmec);
634 if (fm_pos >= fm_size)
635 goto fail_bad;
636
637 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
638 be32_to_cpu(fmec->ec), 0);
639 }
640
641 /* read EC values from scrub list */
642 for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
643 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
644 fm_pos += sizeof(*fmec);
645 if (fm_pos >= fm_size)
646 goto fail_bad;
647
648 add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
649 be32_to_cpu(fmec->ec), 1);
650 }
651
652 /* read EC values from erase list */
653 for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
654 fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
655 fm_pos += sizeof(*fmec);
656 if (fm_pos >= fm_size)
657 goto fail_bad;
658
659 add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
660 be32_to_cpu(fmec->ec), 1);
661 }
662
663 ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
664 ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
665
666 /* Iterate over all volumes and read their EBA table */
667 for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
668 fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
669 fm_pos += sizeof(*fmvhdr);
670 if (fm_pos >= fm_size)
671 goto fail_bad;
672
673 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
32608703 674 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
675 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
676 goto fail_bad;
677 }
678
679 av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
680 be32_to_cpu(fmvhdr->used_ebs),
681 be32_to_cpu(fmvhdr->data_pad),
682 fmvhdr->vol_type,
683 be32_to_cpu(fmvhdr->last_eb_bytes));
684
685 if (!av)
686 goto fail_bad;
687
688 ai->vols_found++;
689 if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
690 ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
691
692 fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
693 fm_pos += sizeof(*fm_eba);
694 fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
695 if (fm_pos >= fm_size)
696 goto fail_bad;
697
698 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
32608703 699 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
700 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
701 goto fail_bad;
702 }
703
704 for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
705 int pnum = be32_to_cpu(fm_eba->pnum[j]);
706
707 if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
708 continue;
709
710 aeb = NULL;
711 list_for_each_entry(tmp_aeb, &used, u.list) {
584d4623 712 if (tmp_aeb->pnum == pnum) {
dbb7d2a8 713 aeb = tmp_aeb;
584d4623
BP
714 break;
715 }
dbb7d2a8
RW
716 }
717
dbb7d2a8 718 if (!aeb) {
d141a8ef
RW
719 ubi_err(ubi, "PEB %i is in EBA but not in used list", pnum);
720 goto fail_bad;
dbb7d2a8
RW
721 }
722
723 aeb->lnum = j;
724
725 if (av->highest_lnum <= aeb->lnum)
726 av->highest_lnum = aeb->lnum;
727
728 assign_aeb_to_av(ai, aeb, av);
729
730 dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
731 aeb->pnum, aeb->lnum, av->vol_id);
732 }
dbb7d2a8
RW
733 }
734
d141a8ef 735 ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum, &free);
dbb7d2a8
RW
736 if (ret)
737 goto fail;
738
d141a8ef 739 ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum, &free);
dbb7d2a8
RW
740 if (ret)
741 goto fail;
742
743 if (max_sqnum > ai->max_sqnum)
744 ai->max_sqnum = max_sqnum;
745
6a059abd
WY
746 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list)
747 list_move_tail(&tmp_aeb->u.list, &ai->free);
dbb7d2a8 748
a83832a7
RW
749 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list)
750 list_move_tail(&tmp_aeb->u.list, &ai->erase);
751
ae0d1469
RW
752 ubi_assert(list_empty(&free));
753
dbb7d2a8
RW
754 /*
755 * If fastmap is leaking PEBs (must not happen), raise a
756 * fat warning and fall back to scanning mode.
757 * We do this here because in ubi_wl_init() it's too late
758 * and we cannot fall back to scanning.
759 */
760 if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
761 ai->bad_peb_count - fm->used_blocks))
762 goto fail_bad;
763
764 return 0;
765
766fail_bad:
767 ret = UBI_BAD_FASTMAP;
768fail:
fe24c6e5 769 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
fe24c6e5 770 list_del(&tmp_aeb->u.list);
5547fec7 771 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
fe24c6e5 772 }
fe24c6e5 773 list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
fe24c6e5 774 list_del(&tmp_aeb->u.list);
5547fec7 775 kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
fe24c6e5
RW
776 }
777
dbb7d2a8
RW
778 return ret;
779}
780
781/**
782 * ubi_scan_fastmap - scan the fastmap.
783 * @ubi: UBI device object
784 * @ai: UBI attach info to be filled
785 * @fm_anchor: The fastmap starts at this PEB
786 *
787 * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
788 * UBI_BAD_FASTMAP if one was found but is not usable.
789 * < 0 indicates an internal error.
790 */
791int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
792 int fm_anchor)
793{
794 struct ubi_fm_sb *fmsb, *fmsb2;
795 struct ubi_vid_hdr *vh;
796 struct ubi_ec_hdr *ech;
797 struct ubi_fastmap_layout *fm;
798 int i, used_blocks, pnum, ret = 0;
799 size_t fm_size;
800 __be32 crc, tmp_crc;
801 unsigned long long sqnum = 0;
802
111ab0b2 803 down_write(&ubi->fm_protect);
dbb7d2a8
RW
804 memset(ubi->fm_buf, 0, ubi->fm_size);
805
806 fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
807 if (!fmsb) {
808 ret = -ENOMEM;
809 goto out;
810 }
811
812 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
813 if (!fm) {
814 ret = -ENOMEM;
815 kfree(fmsb);
816 goto out;
817 }
818
819 ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
820 if (ret && ret != UBI_IO_BITFLIPS)
821 goto free_fm_sb;
822 else if (ret == UBI_IO_BITFLIPS)
823 fm->to_be_tortured[0] = 1;
824
825 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
32608703 826 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
dbb7d2a8
RW
827 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
828 ret = UBI_BAD_FASTMAP;
829 goto free_fm_sb;
830 }
831
832 if (fmsb->version != UBI_FM_FMT_VERSION) {
32608703 833 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
dbb7d2a8
RW
834 fmsb->version, UBI_FM_FMT_VERSION);
835 ret = UBI_BAD_FASTMAP;
836 goto free_fm_sb;
837 }
838
839 used_blocks = be32_to_cpu(fmsb->used_blocks);
840 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
32608703
TB
841 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
842 used_blocks);
dbb7d2a8
RW
843 ret = UBI_BAD_FASTMAP;
844 goto free_fm_sb;
845 }
846
847 fm_size = ubi->leb_size * used_blocks;
848 if (fm_size != ubi->fm_size) {
32608703
TB
849 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
850 fm_size, ubi->fm_size);
dbb7d2a8
RW
851 ret = UBI_BAD_FASTMAP;
852 goto free_fm_sb;
853 }
854
855 ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
856 if (!ech) {
857 ret = -ENOMEM;
858 goto free_fm_sb;
859 }
860
861 vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
862 if (!vh) {
863 ret = -ENOMEM;
864 goto free_hdr;
865 }
866
867 for (i = 0; i < used_blocks; i++) {
c22301ad
RG
868 int image_seq;
869
dbb7d2a8
RW
870 pnum = be32_to_cpu(fmsb->block_loc[i]);
871
872 if (ubi_io_is_bad(ubi, pnum)) {
873 ret = UBI_BAD_FASTMAP;
874 goto free_hdr;
875 }
876
877 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
878 if (ret && ret != UBI_IO_BITFLIPS) {
32608703 879 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
dbb7d2a8
RW
880 i, pnum);
881 if (ret > 0)
882 ret = UBI_BAD_FASTMAP;
883 goto free_hdr;
884 } else if (ret == UBI_IO_BITFLIPS)
885 fm->to_be_tortured[i] = 1;
886
c22301ad 887 image_seq = be32_to_cpu(ech->image_seq);
dbb7d2a8 888 if (!ubi->image_seq)
c22301ad 889 ubi->image_seq = image_seq;
dbb7d2a8 890
c22301ad
RG
891 /*
892 * Older UBI implementations have image_seq set to zero, so
893 * we shouldn't fail if image_seq == 0.
894 */
895 if (image_seq && (image_seq != ubi->image_seq)) {
32608703 896 ubi_err(ubi, "wrong image seq:%d instead of %d",
c22301ad 897 be32_to_cpu(ech->image_seq), ubi->image_seq);
dbb7d2a8
RW
898 ret = UBI_BAD_FASTMAP;
899 goto free_hdr;
900 }
901
902 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
903 if (ret && ret != UBI_IO_BITFLIPS) {
32608703 904 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
dbb7d2a8
RW
905 i, pnum);
906 goto free_hdr;
907 }
908
909 if (i == 0) {
910 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
32608703 911 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
dbb7d2a8
RW
912 be32_to_cpu(vh->vol_id),
913 UBI_FM_SB_VOLUME_ID);
914 ret = UBI_BAD_FASTMAP;
915 goto free_hdr;
916 }
917 } else {
918 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
32608703 919 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
dbb7d2a8
RW
920 be32_to_cpu(vh->vol_id),
921 UBI_FM_DATA_VOLUME_ID);
922 ret = UBI_BAD_FASTMAP;
923 goto free_hdr;
924 }
925 }
926
927 if (sqnum < be64_to_cpu(vh->sqnum))
928 sqnum = be64_to_cpu(vh->sqnum);
929
930 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
931 ubi->leb_start, ubi->leb_size);
932 if (ret && ret != UBI_IO_BITFLIPS) {
32608703 933 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
dbb7d2a8
RW
934 "err: %i)", i, pnum, ret);
935 goto free_hdr;
936 }
937 }
938
939 kfree(fmsb);
940 fmsb = NULL;
941
942 fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
943 tmp_crc = be32_to_cpu(fmsb2->data_crc);
944 fmsb2->data_crc = 0;
945 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
946 if (crc != tmp_crc) {
32608703
TB
947 ubi_err(ubi, "fastmap data CRC is invalid");
948 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
949 tmp_crc, crc);
dbb7d2a8
RW
950 ret = UBI_BAD_FASTMAP;
951 goto free_hdr;
952 }
953
954 fmsb2->sqnum = sqnum;
955
956 fm->used_blocks = used_blocks;
957
958 ret = ubi_attach_fastmap(ubi, ai, fm);
959 if (ret) {
960 if (ret > 0)
961 ret = UBI_BAD_FASTMAP;
962 goto free_hdr;
963 }
964
965 for (i = 0; i < used_blocks; i++) {
966 struct ubi_wl_entry *e;
967
968 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
969 if (!e) {
970 while (i--)
971 kfree(fm->e[i]);
972
973 ret = -ENOMEM;
974 goto free_hdr;
975 }
976
977 e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
978 e->ec = be32_to_cpu(fmsb2->block_ec[i]);
979 fm->e[i] = e;
980 }
981
982 ubi->fm = fm;
983 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
984 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
32608703
TB
985 ubi_msg(ubi, "attached by fastmap");
986 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
987 ubi_msg(ubi, "fastmap WL pool size: %d",
988 ubi->fm_wl_pool.max_size);
dbb7d2a8
RW
989 ubi->fm_disabled = 0;
990
991 ubi_free_vid_hdr(ubi, vh);
992 kfree(ech);
993out:
111ab0b2 994 up_write(&ubi->fm_protect);
dbb7d2a8 995 if (ret == UBI_BAD_FASTMAP)
32608703 996 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
dbb7d2a8
RW
997 return ret;
998
999free_hdr:
1000 ubi_free_vid_hdr(ubi, vh);
1001 kfree(ech);
1002free_fm_sb:
1003 kfree(fmsb);
1004 kfree(fm);
1005 goto out;
1006}
1007
1008/**
1009 * ubi_write_fastmap - writes a fastmap.
1010 * @ubi: UBI device object
1011 * @new_fm: the to be written fastmap
1012 *
1013 * Returns 0 on success, < 0 indicates an internal error.
1014 */
1015static int ubi_write_fastmap(struct ubi_device *ubi,
1016 struct ubi_fastmap_layout *new_fm)
1017{
1018 size_t fm_pos = 0;
1019 void *fm_raw;
1020 struct ubi_fm_sb *fmsb;
1021 struct ubi_fm_hdr *fmh;
1022 struct ubi_fm_scan_pool *fmpl1, *fmpl2;
1023 struct ubi_fm_ec *fec;
1024 struct ubi_fm_volhdr *fvh;
1025 struct ubi_fm_eba *feba;
1026 struct rb_node *node;
1027 struct ubi_wl_entry *wl_e;
1028 struct ubi_volume *vol;
1029 struct ubi_vid_hdr *avhdr, *dvhdr;
1030 struct ubi_work *ubi_wrk;
1031 int ret, i, j, free_peb_count, used_peb_count, vol_count;
1032 int scrub_peb_count, erase_peb_count;
1033
1034 fm_raw = ubi->fm_buf;
1035 memset(ubi->fm_buf, 0, ubi->fm_size);
1036
1037 avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1038 if (!avhdr) {
1039 ret = -ENOMEM;
1040 goto out;
1041 }
1042
1043 dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
1044 if (!dvhdr) {
1045 ret = -ENOMEM;
1046 goto out_kfree;
1047 }
1048
1049 spin_lock(&ubi->volumes_lock);
1050 spin_lock(&ubi->wl_lock);
1051
1052 fmsb = (struct ubi_fm_sb *)fm_raw;
1053 fm_pos += sizeof(*fmsb);
1054 ubi_assert(fm_pos <= ubi->fm_size);
1055
1056 fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
1057 fm_pos += sizeof(*fmh);
1058 ubi_assert(fm_pos <= ubi->fm_size);
1059
1060 fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
1061 fmsb->version = UBI_FM_FMT_VERSION;
1062 fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
1063 /* the max sqnum will be filled in while *reading* the fastmap */
1064 fmsb->sqnum = 0;
1065
1066 fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
1067 free_peb_count = 0;
1068 used_peb_count = 0;
1069 scrub_peb_count = 0;
1070 erase_peb_count = 0;
1071 vol_count = 0;
1072
1073 fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1074 fm_pos += sizeof(*fmpl1);
1075 fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1076 fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
1077 fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
1078
1079 for (i = 0; i < ubi->fm_pool.size; i++)
1080 fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
1081
1082 fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
1083 fm_pos += sizeof(*fmpl2);
1084 fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
1085 fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
1086 fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
1087
1088 for (i = 0; i < ubi->fm_wl_pool.size; i++)
1089 fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
1090
1091 for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
1092 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1093 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1094
1095 fec->pnum = cpu_to_be32(wl_e->pnum);
1096 fec->ec = cpu_to_be32(wl_e->ec);
1097
1098 free_peb_count++;
1099 fm_pos += sizeof(*fec);
1100 ubi_assert(fm_pos <= ubi->fm_size);
1101 }
1102 fmh->free_peb_count = cpu_to_be32(free_peb_count);
1103
1104 for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
1105 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1106 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1107
1108 fec->pnum = cpu_to_be32(wl_e->pnum);
1109 fec->ec = cpu_to_be32(wl_e->ec);
1110
1111 used_peb_count++;
1112 fm_pos += sizeof(*fec);
1113 ubi_assert(fm_pos <= ubi->fm_size);
1114 }
4f5e3b6f
RW
1115
1116 for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
1117 list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
1118 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1119
1120 fec->pnum = cpu_to_be32(wl_e->pnum);
1121 fec->ec = cpu_to_be32(wl_e->ec);
1122
1123 used_peb_count++;
1124 fm_pos += sizeof(*fec);
1125 ubi_assert(fm_pos <= ubi->fm_size);
1126 }
1127 }
dbb7d2a8
RW
1128 fmh->used_peb_count = cpu_to_be32(used_peb_count);
1129
1130 for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
1131 wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
1132 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1133
1134 fec->pnum = cpu_to_be32(wl_e->pnum);
1135 fec->ec = cpu_to_be32(wl_e->ec);
1136
1137 scrub_peb_count++;
1138 fm_pos += sizeof(*fec);
1139 ubi_assert(fm_pos <= ubi->fm_size);
1140 }
1141 fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
1142
1143
1144 list_for_each_entry(ubi_wrk, &ubi->works, list) {
1145 if (ubi_is_erase_work(ubi_wrk)) {
1146 wl_e = ubi_wrk->e;
1147 ubi_assert(wl_e);
1148
1149 fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
1150
1151 fec->pnum = cpu_to_be32(wl_e->pnum);
1152 fec->ec = cpu_to_be32(wl_e->ec);
1153
1154 erase_peb_count++;
1155 fm_pos += sizeof(*fec);
1156 ubi_assert(fm_pos <= ubi->fm_size);
1157 }
1158 }
1159 fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
1160
1161 for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
1162 vol = ubi->volumes[i];
1163
1164 if (!vol)
1165 continue;
1166
1167 vol_count++;
1168
1169 fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
1170 fm_pos += sizeof(*fvh);
1171 ubi_assert(fm_pos <= ubi->fm_size);
1172
1173 fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
1174 fvh->vol_id = cpu_to_be32(vol->vol_id);
1175 fvh->vol_type = vol->vol_type;
1176 fvh->used_ebs = cpu_to_be32(vol->used_ebs);
1177 fvh->data_pad = cpu_to_be32(vol->data_pad);
1178 fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
1179
1180 ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
1181 vol->vol_type == UBI_STATIC_VOLUME);
1182
1183 feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
1184 fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
1185 ubi_assert(fm_pos <= ubi->fm_size);
1186
1187 for (j = 0; j < vol->reserved_pebs; j++)
1188 feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
1189
1190 feba->reserved_pebs = cpu_to_be32(j);
1191 feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
1192 }
1193 fmh->vol_count = cpu_to_be32(vol_count);
1194 fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
1195
1196 avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1197 avhdr->lnum = 0;
1198
1199 spin_unlock(&ubi->wl_lock);
1200 spin_unlock(&ubi->volumes_lock);
1201
1202 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1203 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1204 if (ret) {
32608703 1205 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
dbb7d2a8
RW
1206 goto out_kfree;
1207 }
1208
1209 for (i = 0; i < new_fm->used_blocks; i++) {
1210 fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
1211 fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
1212 }
1213
1214 fmsb->data_crc = 0;
1215 fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
1216 ubi->fm_size));
1217
1218 for (i = 1; i < new_fm->used_blocks; i++) {
1219 dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1220 dvhdr->lnum = cpu_to_be32(i);
1221 dbg_bld("writing fastmap data to PEB %i sqnum %llu",
1222 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1223 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1224 if (ret) {
32608703 1225 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
dbb7d2a8
RW
1226 new_fm->e[i]->pnum);
1227 goto out_kfree;
1228 }
1229 }
1230
1231 for (i = 0; i < new_fm->used_blocks; i++) {
1232 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1233 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1234 if (ret) {
32608703 1235 ubi_err(ubi, "unable to write fastmap to PEB %i!",
dbb7d2a8
RW
1236 new_fm->e[i]->pnum);
1237 goto out_kfree;
1238 }
1239 }
1240
1241 ubi_assert(new_fm);
1242 ubi->fm = new_fm;
1243
1244 dbg_bld("fastmap written!");
1245
1246out_kfree:
1247 ubi_free_vid_hdr(ubi, avhdr);
1248 ubi_free_vid_hdr(ubi, dvhdr);
1249out:
1250 return ret;
1251}
1252
1253/**
1254 * erase_block - Manually erase a PEB.
1255 * @ubi: UBI device object
1256 * @pnum: PEB to be erased
1257 *
1258 * Returns the new EC value on success, < 0 indicates an internal error.
1259 */
1260static int erase_block(struct ubi_device *ubi, int pnum)
1261{
1262 int ret;
1263 struct ubi_ec_hdr *ec_hdr;
1264 long long ec;
1265
1266 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
1267 if (!ec_hdr)
1268 return -ENOMEM;
1269
1270 ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1271 if (ret < 0)
1272 goto out;
1273 else if (ret && ret != UBI_IO_BITFLIPS) {
1274 ret = -EINVAL;
1275 goto out;
1276 }
1277
1278 ret = ubi_io_sync_erase(ubi, pnum, 0);
1279 if (ret < 0)
1280 goto out;
1281
1282 ec = be64_to_cpu(ec_hdr->ec);
1283 ec += ret;
1284 if (ec > UBI_MAX_ERASECOUNTER) {
1285 ret = -EINVAL;
1286 goto out;
1287 }
1288
1289 ec_hdr->ec = cpu_to_be64(ec);
1290 ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
1291 if (ret < 0)
1292 goto out;
1293
1294 ret = ec;
1295out:
1296 kfree(ec_hdr);
1297 return ret;
1298}
1299
1300/**
1301 * invalidate_fastmap - destroys a fastmap.
1302 * @ubi: UBI device object
dbb7d2a8 1303 *
5ca97ad8
RW
1304 * This function ensures that upon next UBI attach a full scan
1305 * is issued. We need this if UBI is about to write a new fastmap
1306 * but is unable to do so. In this case we have two options:
1307 * a) Make sure that the current fastmap will not be usued upon
1308 * attach time and contine or b) fall back to RO mode to have the
1309 * current fastmap in a valid state.
dbb7d2a8
RW
1310 * Returns 0 on success, < 0 indicates an internal error.
1311 */
5ca97ad8 1312static int invalidate_fastmap(struct ubi_device *ubi)
dbb7d2a8 1313{
8930fa50 1314 int ret;
5ca97ad8
RW
1315 struct ubi_fastmap_layout *fm;
1316 struct ubi_wl_entry *e;
1317 struct ubi_vid_hdr *vh = NULL;
dbb7d2a8 1318
5ca97ad8
RW
1319 if (!ubi->fm)
1320 return 0;
1321
1322 ubi->fm = NULL;
1323
1324 ret = -ENOMEM;
1325 fm = kzalloc(sizeof(*fm), GFP_KERNEL);
1326 if (!fm)
1327 goto out;
dbb7d2a8
RW
1328
1329 vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
1330 if (!vh)
5ca97ad8
RW
1331 goto out_free_fm;
1332
1333 ret = -ENOSPC;
1334 e = ubi_wl_get_fm_peb(ubi, 1);
1335 if (!e)
1336 goto out_free_fm;
dbb7d2a8 1337
5ca97ad8
RW
1338 /*
1339 * Create fake fastmap such that UBI will fall back
1340 * to scanning mode.
1341 */
dbb7d2a8 1342 vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
5ca97ad8
RW
1343 ret = ubi_io_write_vid_hdr(ubi, e->pnum, vh);
1344 if (ret < 0) {
1345 ubi_wl_put_fm_peb(ubi, e, 0, 0);
1346 goto out_free_fm;
1347 }
dbb7d2a8 1348
5ca97ad8
RW
1349 fm->used_blocks = 1;
1350 fm->e[0] = e;
1351
1352 ubi->fm = fm;
1353
1354out:
1355 ubi_free_vid_hdr(ubi, vh);
dbb7d2a8 1356 return ret;
5ca97ad8
RW
1357
1358out_free_fm:
1359 kfree(fm);
1360 goto out;
1361}
1362
1363/**
1364 * return_fm_pebs - returns all PEBs used by a fastmap back to the
1365 * WL sub-system.
1366 * @ubi: UBI device object
1367 * @fm: fastmap layout object
1368 */
1369static void return_fm_pebs(struct ubi_device *ubi,
1370 struct ubi_fastmap_layout *fm)
1371{
1372 int i;
1373
1374 if (!fm)
1375 return;
1376
1377 for (i = 0; i < fm->used_blocks; i++) {
1378 if (fm->e[i]) {
1379 ubi_wl_put_fm_peb(ubi, fm->e[i], i,
1380 fm->to_be_tortured[i]);
1381 fm->e[i] = NULL;
1382 }
1383 }
dbb7d2a8
RW
1384}
1385
1386/**
1387 * ubi_update_fastmap - will be called by UBI if a volume changes or
1388 * a fastmap pool becomes full.
1389 * @ubi: UBI device object
1390 *
1391 * Returns 0 on success, < 0 indicates an internal error.
1392 */
1393int ubi_update_fastmap(struct ubi_device *ubi)
1394{
5ca97ad8 1395 int ret, i, j;
dbb7d2a8
RW
1396 struct ubi_fastmap_layout *new_fm, *old_fm;
1397 struct ubi_wl_entry *tmp_e;
1398
111ab0b2 1399 down_write(&ubi->fm_protect);
dbb7d2a8
RW
1400
1401 ubi_refill_pools(ubi);
1402
1403 if (ubi->ro_mode || ubi->fm_disabled) {
111ab0b2 1404 up_write(&ubi->fm_protect);
dbb7d2a8
RW
1405 return 0;
1406 }
1407
1408 ret = ubi_ensure_anchor_pebs(ubi);
1409 if (ret) {
111ab0b2 1410 up_write(&ubi->fm_protect);
dbb7d2a8
RW
1411 return ret;
1412 }
1413
1414 new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
1415 if (!new_fm) {
111ab0b2 1416 up_write(&ubi->fm_protect);
dbb7d2a8
RW
1417 return -ENOMEM;
1418 }
1419
1420 new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
dbb7d2a8
RW
1421 old_fm = ubi->fm;
1422 ubi->fm = NULL;
1423
1424 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
32608703 1425 ubi_err(ubi, "fastmap too large");
dbb7d2a8
RW
1426 ret = -ENOSPC;
1427 goto err;
1428 }
1429
1430 for (i = 1; i < new_fm->used_blocks; i++) {
1431 spin_lock(&ubi->wl_lock);
1432 tmp_e = ubi_wl_get_fm_peb(ubi, 0);
1433 spin_unlock(&ubi->wl_lock);
1434
5ca97ad8
RW
1435 if (!tmp_e) {
1436 if (old_fm && old_fm->e[i]) {
1437 ret = erase_block(ubi, old_fm->e[i]->pnum);
1438 if (ret < 0) {
1439 ubi_err(ubi, "could not erase old fastmap PEB");
1440
1441 for (j = 1; j < i; j++) {
1442 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1443 j, 0);
1444 new_fm->e[j] = NULL;
1445 }
1446 goto err;
1447 }
1448 new_fm->e[i] = old_fm->e[i];
1449 old_fm->e[i] = NULL;
1450 } else {
1451 ubi_err(ubi, "could not get any free erase block");
1452
1453 for (j = 1; j < i; j++) {
1454 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
1455 new_fm->e[j] = NULL;
1456 }
dbb7d2a8 1457
5ca97ad8 1458 ret = -ENOSPC;
dbb7d2a8
RW
1459 goto err;
1460 }
dbb7d2a8 1461 } else {
c4ca6be9 1462 new_fm->e[i] = tmp_e;
dbb7d2a8 1463
5ca97ad8 1464 if (old_fm && old_fm->e[i]) {
dbb7d2a8
RW
1465 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1466 old_fm->to_be_tortured[i]);
5ca97ad8
RW
1467 old_fm->e[i] = NULL;
1468 }
dbb7d2a8
RW
1469 }
1470 }
1471
61de74ce
RW
1472 /* Old fastmap is larger than the new one */
1473 if (old_fm && new_fm->used_blocks < old_fm->used_blocks) {
1474 for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
1475 ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
1476 old_fm->to_be_tortured[i]);
5ca97ad8 1477 old_fm->e[i] = NULL;
61de74ce
RW
1478 }
1479 }
1480
dbb7d2a8
RW
1481 spin_lock(&ubi->wl_lock);
1482 tmp_e = ubi_wl_get_fm_peb(ubi, 1);
1483 spin_unlock(&ubi->wl_lock);
1484
1485 if (old_fm) {
1486 /* no fresh anchor PEB was found, reuse the old one */
1487 if (!tmp_e) {
1488 ret = erase_block(ubi, old_fm->e[0]->pnum);
1489 if (ret < 0) {
32608703 1490 ubi_err(ubi, "could not erase old anchor PEB");
dbb7d2a8 1491
5ca97ad8 1492 for (i = 1; i < new_fm->used_blocks; i++) {
dbb7d2a8
RW
1493 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
1494 i, 0);
5ca97ad8
RW
1495 new_fm->e[i] = NULL;
1496 }
dbb7d2a8
RW
1497 goto err;
1498 }
c4ca6be9 1499 new_fm->e[0] = old_fm->e[0];
dbb7d2a8 1500 new_fm->e[0]->ec = ret;
5ca97ad8 1501 old_fm->e[0] = NULL;
dbb7d2a8
RW
1502 } else {
1503 /* we've got a new anchor PEB, return the old one */
1504 ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
1505 old_fm->to_be_tortured[0]);
c4ca6be9 1506 new_fm->e[0] = tmp_e;
5ca97ad8 1507 old_fm->e[0] = NULL;
dbb7d2a8
RW
1508 }
1509 } else {
1510 if (!tmp_e) {
32608703 1511 ubi_err(ubi, "could not find any anchor PEB");
dbb7d2a8 1512
5ca97ad8 1513 for (i = 1; i < new_fm->used_blocks; i++) {
dbb7d2a8 1514 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
5ca97ad8
RW
1515 new_fm->e[i] = NULL;
1516 }
dbb7d2a8
RW
1517
1518 ret = -ENOSPC;
1519 goto err;
1520 }
c4ca6be9 1521 new_fm->e[0] = tmp_e;
dbb7d2a8
RW
1522 }
1523
1524 down_write(&ubi->work_sem);
111ab0b2 1525 down_write(&ubi->fm_eba_sem);
dbb7d2a8 1526 ret = ubi_write_fastmap(ubi, new_fm);
111ab0b2 1527 up_write(&ubi->fm_eba_sem);
dbb7d2a8
RW
1528 up_write(&ubi->work_sem);
1529
1530 if (ret)
1531 goto err;
1532
1533out_unlock:
111ab0b2 1534 up_write(&ubi->fm_protect);
dbb7d2a8
RW
1535 kfree(old_fm);
1536 return ret;
1537
1538err:
32608703 1539 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
dbb7d2a8 1540
5ca97ad8
RW
1541 ret = invalidate_fastmap(ubi);
1542 if (ret < 0) {
1543 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1544 ubi_ro_mode(ubi);
1545 } else {
1546 return_fm_pebs(ubi, old_fm);
1547 return_fm_pebs(ubi, new_fm);
1548 ret = 0;
dbb7d2a8 1549 }
5ca97ad8
RW
1550
1551 kfree(new_fm);
dbb7d2a8
RW
1552 goto out_unlock;
1553}
This page took 0.356352 seconds and 5 git commands to generate.