jffs2: Convert printks to pr_<level>
[deliverable/linux.git] / fs / jffs2 / wbuf.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004 Thomas Gleixner <tglx@linutronix.de>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
9 *
10 * For licensing information, see the file 'LICENCE' in this directory.
11 *
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/crc32.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/jiffies.h>
20 #include <linux/sched.h>
21
22 #include "nodelist.h"
23
24 /* For testing write failures */
25 #undef BREAKME
26 #undef BREAKMEHEADER
27
28 #ifdef BREAKME
29 static unsigned char *brokenbuf;
30 #endif
31
32 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
33 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
34
35 /* max. erase failures before we mark a block bad */
36 #define MAX_ERASE_FAILURES 2
37
38 struct jffs2_inodirty {
39 uint32_t ino;
40 struct jffs2_inodirty *next;
41 };
42
43 static struct jffs2_inodirty inodirty_nomem;
44
45 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
46 {
47 struct jffs2_inodirty *this = c->wbuf_inodes;
48
49 /* If a malloc failed, consider _everything_ dirty */
50 if (this == &inodirty_nomem)
51 return 1;
52
53 /* If ino == 0, _any_ non-GC writes mean 'yes' */
54 if (this && !ino)
55 return 1;
56
57 /* Look to see if the inode in question is pending in the wbuf */
58 while (this) {
59 if (this->ino == ino)
60 return 1;
61 this = this->next;
62 }
63 return 0;
64 }
65
66 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
67 {
68 struct jffs2_inodirty *this;
69
70 this = c->wbuf_inodes;
71
72 if (this != &inodirty_nomem) {
73 while (this) {
74 struct jffs2_inodirty *next = this->next;
75 kfree(this);
76 this = next;
77 }
78 }
79 c->wbuf_inodes = NULL;
80 }
81
82 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
83 {
84 struct jffs2_inodirty *new;
85
86 /* Mark the superblock dirty so that kupdated will flush... */
87 jffs2_dirty_trigger(c);
88
89 if (jffs2_wbuf_pending_for_ino(c, ino))
90 return;
91
92 new = kmalloc(sizeof(*new), GFP_KERNEL);
93 if (!new) {
94 jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n");
95 jffs2_clear_wbuf_ino_list(c);
96 c->wbuf_inodes = &inodirty_nomem;
97 return;
98 }
99 new->ino = ino;
100 new->next = c->wbuf_inodes;
101 c->wbuf_inodes = new;
102 return;
103 }
104
105 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
106 {
107 struct list_head *this, *next;
108 static int n;
109
110 if (list_empty(&c->erasable_pending_wbuf_list))
111 return;
112
113 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
114 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
115
116 jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n",
117 jeb->offset);
118 list_del(this);
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 jffs2_dbg(1, "...and adding to erase_pending_list\n");
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_garbage_collect_trigger(c);
126 } else {
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 jffs2_dbg(1, "...and adding to erasable_list\n");
130 list_add_tail(&jeb->list, &c->erasable_list);
131 }
132 }
133 }
134
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY 1
137
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
139 {
140 jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset);
141
142 /* File the existing block on the bad_used_list.... */
143 if (c->nextblock == jeb)
144 c->nextblock = NULL;
145 else /* Not sure this should ever happen... need more coffee */
146 list_del(&jeb->list);
147 if (jeb->first_node) {
148 jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n",
149 jeb->offset);
150 list_add(&jeb->list, &c->bad_used_list);
151 } else {
152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
153 /* It has to have had some nodes or we couldn't be here */
154 jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n",
155 jeb->offset);
156 list_add(&jeb->list, &c->erase_pending_list);
157 c->nr_erasing_blocks++;
158 jffs2_garbage_collect_trigger(c);
159 }
160
161 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
162 uint32_t oldfree = jeb->free_size;
163
164 jffs2_link_node_ref(c, jeb,
165 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE,
166 oldfree, NULL);
167 /* convert to wasted */
168 c->wasted_size += oldfree;
169 jeb->wasted_size += oldfree;
170 c->dirty_size -= oldfree;
171 jeb->dirty_size -= oldfree;
172 }
173
174 jffs2_dbg_dump_block_lists_nolock(c);
175 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
176 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
177 }
178
179 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c,
180 struct jffs2_inode_info *f,
181 struct jffs2_raw_node_ref *raw,
182 union jffs2_node_union *node)
183 {
184 struct jffs2_node_frag *frag;
185 struct jffs2_full_dirent *fd;
186
187 dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n",
188 node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype));
189
190 BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 &&
191 je16_to_cpu(node->u.magic) != 0);
192
193 switch (je16_to_cpu(node->u.nodetype)) {
194 case JFFS2_NODETYPE_INODE:
195 if (f->metadata && f->metadata->raw == raw) {
196 dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata);
197 return &f->metadata->raw;
198 }
199 frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset));
200 BUG_ON(!frag);
201 /* Find a frag which refers to the full_dnode we want to modify */
202 while (!frag->node || frag->node->raw != raw) {
203 frag = frag_next(frag);
204 BUG_ON(!frag);
205 }
206 dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node);
207 return &frag->node->raw;
208
209 case JFFS2_NODETYPE_DIRENT:
210 for (fd = f->dents; fd; fd = fd->next) {
211 if (fd->raw == raw) {
212 dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd);
213 return &fd->raw;
214 }
215 }
216 BUG();
217
218 default:
219 dbg_noderef("Don't care about replacing raw for nodetype %x\n",
220 je16_to_cpu(node->u.nodetype));
221 break;
222 }
223 return NULL;
224 }
225
226 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
227 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf,
228 uint32_t ofs)
229 {
230 int ret;
231 size_t retlen;
232 char *eccstr;
233
234 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify);
235 if (ret && ret != -EUCLEAN && ret != -EBADMSG) {
236 pr_warn("%s(): Read back of page at %08x failed: %d\n",
237 __func__, c->wbuf_ofs, ret);
238 return ret;
239 } else if (retlen != c->wbuf_pagesize) {
240 pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n",
241 __func__, ofs, retlen, c->wbuf_pagesize);
242 return -EIO;
243 }
244 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize))
245 return 0;
246
247 if (ret == -EUCLEAN)
248 eccstr = "corrected";
249 else if (ret == -EBADMSG)
250 eccstr = "correction failed";
251 else
252 eccstr = "OK or unused";
253
254 pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n",
255 eccstr, c->wbuf_ofs);
256 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
257 c->wbuf, c->wbuf_pagesize, 0);
258
259 pr_warn("Read back:\n");
260 print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
261 c->wbuf_verify, c->wbuf_pagesize, 0);
262
263 return -EIO;
264 }
265 #else
266 #define jffs2_verify_write(c,b,o) (0)
267 #endif
268
269 /* Recover from failure to write wbuf. Recover the nodes up to the
270 * wbuf, not the one which we were starting to try to write. */
271
272 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
273 {
274 struct jffs2_eraseblock *jeb, *new_jeb;
275 struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL;
276 size_t retlen;
277 int ret;
278 int nr_refile = 0;
279 unsigned char *buf;
280 uint32_t start, end, ofs, len;
281
282 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
283
284 spin_lock(&c->erase_completion_lock);
285 if (c->wbuf_ofs % c->mtd->erasesize)
286 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
287 else
288 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
289 spin_unlock(&c->erase_completion_lock);
290
291 BUG_ON(!ref_obsolete(jeb->last_node));
292
293 /* Find the first node to be recovered, by skipping over every
294 node which ends before the wbuf starts, or which is obsolete. */
295 for (next = raw = jeb->first_node; next; raw = next) {
296 next = ref_next(raw);
297
298 if (ref_obsolete(raw) ||
299 (next && ref_offset(next) <= c->wbuf_ofs)) {
300 dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
301 ref_offset(raw), ref_flags(raw),
302 (ref_offset(raw) + ref_totlen(c, jeb, raw)),
303 c->wbuf_ofs);
304 continue;
305 }
306 dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n",
307 ref_offset(raw), ref_flags(raw),
308 (ref_offset(raw) + ref_totlen(c, jeb, raw)));
309
310 first_raw = raw;
311 break;
312 }
313
314 if (!first_raw) {
315 /* All nodes were obsolete. Nothing to recover. */
316 jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n");
317 c->wbuf_len = 0;
318 return;
319 }
320
321 start = ref_offset(first_raw);
322 end = ref_offset(jeb->last_node);
323 nr_refile = 1;
324
325 /* Count the number of refs which need to be copied */
326 while ((raw = ref_next(raw)) != jeb->last_node)
327 nr_refile++;
328
329 dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n",
330 start, end, end - start, nr_refile);
331
332 buf = NULL;
333 if (start < c->wbuf_ofs) {
334 /* First affected node was already partially written.
335 * Attempt to reread the old data into our buffer. */
336
337 buf = kmalloc(end - start, GFP_KERNEL);
338 if (!buf) {
339 pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n");
340
341 goto read_failed;
342 }
343
344 /* Do the read... */
345 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen,
346 buf);
347
348 /* ECC recovered ? */
349 if ((ret == -EUCLEAN || ret == -EBADMSG) &&
350 (retlen == c->wbuf_ofs - start))
351 ret = 0;
352
353 if (ret || retlen != c->wbuf_ofs - start) {
354 pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n");
355
356 kfree(buf);
357 buf = NULL;
358 read_failed:
359 first_raw = ref_next(first_raw);
360 nr_refile--;
361 while (first_raw && ref_obsolete(first_raw)) {
362 first_raw = ref_next(first_raw);
363 nr_refile--;
364 }
365
366 /* If this was the only node to be recovered, give up */
367 if (!first_raw) {
368 c->wbuf_len = 0;
369 return;
370 }
371
372 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
373 start = ref_offset(first_raw);
374 dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n",
375 start, end, end - start, nr_refile);
376
377 } else {
378 /* Read succeeded. Copy the remaining data from the wbuf */
379 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
380 }
381 }
382 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
383 Either 'buf' contains the data, or we find it in the wbuf */
384
385 /* ... and get an allocation of space from a shiny new block instead */
386 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE);
387 if (ret) {
388 pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n");
389 kfree(buf);
390 return;
391 }
392
393 /* The summary is not recovered, so it must be disabled for this erase block */
394 jffs2_sum_disable_collecting(c->summary);
395
396 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile);
397 if (ret) {
398 pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n");
399 kfree(buf);
400 return;
401 }
402
403 ofs = write_ofs(c);
404
405 if (end-start >= c->wbuf_pagesize) {
406 /* Need to do another write immediately, but it's possible
407 that this is just because the wbuf itself is completely
408 full, and there's nothing earlier read back from the
409 flash. Hence 'buf' isn't necessarily what we're writing
410 from. */
411 unsigned char *rewrite_buf = buf?:c->wbuf;
412 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
413
414 jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n",
415 towrite, ofs);
416
417 #ifdef BREAKMEHEADER
418 static int breakme;
419 if (breakme++ == 20) {
420 pr_notice("Faking write error at 0x%08x\n", ofs);
421 breakme = 0;
422 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf);
423 ret = -EIO;
424 } else
425 #endif
426 ret = mtd_write(c->mtd, ofs, towrite, &retlen,
427 rewrite_buf);
428
429 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) {
430 /* Argh. We tried. Really we did. */
431 pr_crit("Recovery of wbuf failed due to a second write error\n");
432 kfree(buf);
433
434 if (retlen)
435 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL);
436
437 return;
438 }
439 pr_notice("Recovery of wbuf succeeded to %08x\n", ofs);
440
441 c->wbuf_len = (end - start) - towrite;
442 c->wbuf_ofs = ofs + towrite;
443 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
444 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
445 } else {
446 /* OK, now we're left with the dregs in whichever buffer we're using */
447 if (buf) {
448 memcpy(c->wbuf, buf, end-start);
449 } else {
450 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
451 }
452 c->wbuf_ofs = ofs;
453 c->wbuf_len = end - start;
454 }
455
456 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
457 new_jeb = &c->blocks[ofs / c->sector_size];
458
459 spin_lock(&c->erase_completion_lock);
460 for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) {
461 uint32_t rawlen = ref_totlen(c, jeb, raw);
462 struct jffs2_inode_cache *ic;
463 struct jffs2_raw_node_ref *new_ref;
464 struct jffs2_raw_node_ref **adjust_ref = NULL;
465 struct jffs2_inode_info *f = NULL;
466
467 jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n",
468 rawlen, ref_offset(raw), ref_flags(raw), ofs);
469
470 ic = jffs2_raw_ref_to_ic(raw);
471
472 /* Ick. This XATTR mess should be fixed shortly... */
473 if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) {
474 struct jffs2_xattr_datum *xd = (void *)ic;
475 BUG_ON(xd->node != raw);
476 adjust_ref = &xd->node;
477 raw->next_in_ino = NULL;
478 ic = NULL;
479 } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) {
480 struct jffs2_xattr_datum *xr = (void *)ic;
481 BUG_ON(xr->node != raw);
482 adjust_ref = &xr->node;
483 raw->next_in_ino = NULL;
484 ic = NULL;
485 } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) {
486 struct jffs2_raw_node_ref **p = &ic->nodes;
487
488 /* Remove the old node from the per-inode list */
489 while (*p && *p != (void *)ic) {
490 if (*p == raw) {
491 (*p) = (raw->next_in_ino);
492 raw->next_in_ino = NULL;
493 break;
494 }
495 p = &((*p)->next_in_ino);
496 }
497
498 if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) {
499 /* If it's an in-core inode, then we have to adjust any
500 full_dirent or full_dnode structure to point to the
501 new version instead of the old */
502 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink);
503 if (IS_ERR(f)) {
504 /* Should never happen; it _must_ be present */
505 JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n",
506 ic->ino, PTR_ERR(f));
507 BUG();
508 }
509 /* We don't lock f->sem. There's a number of ways we could
510 end up in here with it already being locked, and nobody's
511 going to modify it on us anyway because we hold the
512 alloc_sem. We're only changing one ->raw pointer too,
513 which we can get away with without upsetting readers. */
514 adjust_ref = jffs2_incore_replace_raw(c, f, raw,
515 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start));
516 } else if (unlikely(ic->state != INO_STATE_PRESENT &&
517 ic->state != INO_STATE_CHECKEDABSENT &&
518 ic->state != INO_STATE_GC)) {
519 JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state);
520 BUG();
521 }
522 }
523
524 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic);
525
526 if (adjust_ref) {
527 BUG_ON(*adjust_ref != raw);
528 *adjust_ref = new_ref;
529 }
530 if (f)
531 jffs2_gc_release_inode(c, f);
532
533 if (!ref_obsolete(raw)) {
534 jeb->dirty_size += rawlen;
535 jeb->used_size -= rawlen;
536 c->dirty_size += rawlen;
537 c->used_size -= rawlen;
538 raw->flash_offset = ref_offset(raw) | REF_OBSOLETE;
539 BUG_ON(raw->next_in_ino);
540 }
541 ofs += rawlen;
542 }
543
544 kfree(buf);
545
546 /* Fix up the original jeb now it's on the bad_list */
547 if (first_raw == jeb->first_node) {
548 jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n",
549 jeb->offset);
550 list_move(&jeb->list, &c->erase_pending_list);
551 c->nr_erasing_blocks++;
552 jffs2_garbage_collect_trigger(c);
553 }
554
555 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
556 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
557
558 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb);
559 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb);
560
561 spin_unlock(&c->erase_completion_lock);
562
563 jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n",
564 c->wbuf_ofs, c->wbuf_len);
565
566 }
567
568 /* Meaning of pad argument:
569 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
570 1: Pad, do not adjust nextblock free_size
571 2: Pad, adjust nextblock free_size
572 */
573 #define NOPAD 0
574 #define PAD_NOACCOUNT 1
575 #define PAD_ACCOUNTING 2
576
577 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
578 {
579 struct jffs2_eraseblock *wbuf_jeb;
580 int ret;
581 size_t retlen;
582
583 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
584 del_timer() the timer we never initialised. */
585 if (!jffs2_is_writebuffered(c))
586 return 0;
587
588 if (!mutex_is_locked(&c->alloc_sem)) {
589 pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n");
590 BUG();
591 }
592
593 if (!c->wbuf_len) /* already checked c->wbuf above */
594 return 0;
595
596 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
597 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1))
598 return -ENOMEM;
599
600 /* claim remaining space on the page
601 this happens, if we have a change to a new block,
602 or if fsync forces us to flush the writebuffer.
603 if we have a switch to next page, we will not have
604 enough remaining space for this.
605 */
606 if (pad ) {
607 c->wbuf_len = PAD(c->wbuf_len);
608
609 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
610 with 8 byte page size */
611 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
612
613 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
614 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
615 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
616 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
617 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
618 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
619 }
620 }
621 /* else jffs2_flash_writev has actually filled in the rest of the
622 buffer for us, and will deal with the node refs etc. later. */
623
624 #ifdef BREAKME
625 static int breakme;
626 if (breakme++ == 20) {
627 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs);
628 breakme = 0;
629 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen,
630 brokenbuf);
631 ret = -EIO;
632 } else
633 #endif
634
635 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
636 &retlen, c->wbuf);
637
638 if (ret) {
639 pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret);
640 goto wfail;
641 } else if (retlen != c->wbuf_pagesize) {
642 pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
643 retlen, c->wbuf_pagesize);
644 ret = -EIO;
645 goto wfail;
646 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) {
647 wfail:
648 jffs2_wbuf_recover(c);
649
650 return ret;
651 }
652
653 /* Adjust free size of the block if we padded. */
654 if (pad) {
655 uint32_t waste = c->wbuf_pagesize - c->wbuf_len;
656
657 jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
658 (wbuf_jeb == c->nextblock) ? "next" : "",
659 wbuf_jeb->offset);
660
661 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
662 padded. If there is less free space in the block than that,
663 something screwed up */
664 if (wbuf_jeb->free_size < waste) {
665 pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
666 c->wbuf_ofs, c->wbuf_len, waste);
667 pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
668 wbuf_jeb->offset, wbuf_jeb->free_size);
669 BUG();
670 }
671
672 spin_lock(&c->erase_completion_lock);
673
674 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL);
675 /* FIXME: that made it count as dirty. Convert to wasted */
676 wbuf_jeb->dirty_size -= waste;
677 c->dirty_size -= waste;
678 wbuf_jeb->wasted_size += waste;
679 c->wasted_size += waste;
680 } else
681 spin_lock(&c->erase_completion_lock);
682
683 /* Stick any now-obsoleted blocks on the erase_pending_list */
684 jffs2_refile_wbuf_blocks(c);
685 jffs2_clear_wbuf_ino_list(c);
686 spin_unlock(&c->erase_completion_lock);
687
688 memset(c->wbuf,0xff,c->wbuf_pagesize);
689 /* adjust write buffer offset, else we get a non contiguous write bug */
690 c->wbuf_ofs += c->wbuf_pagesize;
691 c->wbuf_len = 0;
692 return 0;
693 }
694
695 /* Trigger garbage collection to flush the write-buffer.
696 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
697 outstanding. If ino arg non-zero, do it only if a write for the
698 given inode is outstanding. */
699 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
700 {
701 uint32_t old_wbuf_ofs;
702 uint32_t old_wbuf_len;
703 int ret = 0;
704
705 jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino);
706
707 if (!c->wbuf)
708 return 0;
709
710 mutex_lock(&c->alloc_sem);
711 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
712 jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino);
713 mutex_unlock(&c->alloc_sem);
714 return 0;
715 }
716
717 old_wbuf_ofs = c->wbuf_ofs;
718 old_wbuf_len = c->wbuf_len;
719
720 if (c->unchecked_size) {
721 /* GC won't make any progress for a while */
722 jffs2_dbg(1, "%s(): padding. Not finished checking\n",
723 __func__);
724 down_write(&c->wbuf_sem);
725 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
726 /* retry flushing wbuf in case jffs2_wbuf_recover
727 left some data in the wbuf */
728 if (ret)
729 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
730 up_write(&c->wbuf_sem);
731 } else while (old_wbuf_len &&
732 old_wbuf_ofs == c->wbuf_ofs) {
733
734 mutex_unlock(&c->alloc_sem);
735
736 jffs2_dbg(1, "%s(): calls gc pass\n", __func__);
737
738 ret = jffs2_garbage_collect_pass(c);
739 if (ret) {
740 /* GC failed. Flush it with padding instead */
741 mutex_lock(&c->alloc_sem);
742 down_write(&c->wbuf_sem);
743 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
744 /* retry flushing wbuf in case jffs2_wbuf_recover
745 left some data in the wbuf */
746 if (ret)
747 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
748 up_write(&c->wbuf_sem);
749 break;
750 }
751 mutex_lock(&c->alloc_sem);
752 }
753
754 jffs2_dbg(1, "%s(): ends...\n", __func__);
755
756 mutex_unlock(&c->alloc_sem);
757 return ret;
758 }
759
760 /* Pad write-buffer to end and write it, wasting space. */
761 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
762 {
763 int ret;
764
765 if (!c->wbuf)
766 return 0;
767
768 down_write(&c->wbuf_sem);
769 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
770 /* retry - maybe wbuf recover left some data in wbuf. */
771 if (ret)
772 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
773 up_write(&c->wbuf_sem);
774
775 return ret;
776 }
777
778 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf,
779 size_t len)
780 {
781 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize))
782 return 0;
783
784 if (len > (c->wbuf_pagesize - c->wbuf_len))
785 len = c->wbuf_pagesize - c->wbuf_len;
786 memcpy(c->wbuf + c->wbuf_len, buf, len);
787 c->wbuf_len += (uint32_t) len;
788 return len;
789 }
790
791 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs,
792 unsigned long count, loff_t to, size_t *retlen,
793 uint32_t ino)
794 {
795 struct jffs2_eraseblock *jeb;
796 size_t wbuf_retlen, donelen = 0;
797 uint32_t outvec_to = to;
798 int ret, invec;
799
800 /* If not writebuffered flash, don't bother */
801 if (!jffs2_is_writebuffered(c))
802 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
803
804 down_write(&c->wbuf_sem);
805
806 /* If wbuf_ofs is not initialized, set it to target address */
807 if (c->wbuf_ofs == 0xFFFFFFFF) {
808 c->wbuf_ofs = PAGE_DIV(to);
809 c->wbuf_len = PAGE_MOD(to);
810 memset(c->wbuf,0xff,c->wbuf_pagesize);
811 }
812
813 /*
814 * Sanity checks on target address. It's permitted to write
815 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to
816 * write at the beginning of a new erase block. Anything else,
817 * and you die. New block starts at xxx000c (0-b = block
818 * header)
819 */
820 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
821 /* It's a write to a new block */
822 if (c->wbuf_len) {
823 jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n",
824 __func__, (unsigned long)to, c->wbuf_ofs);
825 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
826 if (ret)
827 goto outerr;
828 }
829 /* set pointer to new block */
830 c->wbuf_ofs = PAGE_DIV(to);
831 c->wbuf_len = PAGE_MOD(to);
832 }
833
834 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
835 /* We're not writing immediately after the writebuffer. Bad. */
836 pr_crit("%s(): Non-contiguous write to %08lx\n",
837 __func__, (unsigned long)to);
838 if (c->wbuf_len)
839 pr_crit("wbuf was previously %08x-%08x\n",
840 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len);
841 BUG();
842 }
843
844 /* adjust alignment offset */
845 if (c->wbuf_len != PAGE_MOD(to)) {
846 c->wbuf_len = PAGE_MOD(to);
847 /* take care of alignment to next page */
848 if (!c->wbuf_len) {
849 c->wbuf_len = c->wbuf_pagesize;
850 ret = __jffs2_flush_wbuf(c, NOPAD);
851 if (ret)
852 goto outerr;
853 }
854 }
855
856 for (invec = 0; invec < count; invec++) {
857 int vlen = invecs[invec].iov_len;
858 uint8_t *v = invecs[invec].iov_base;
859
860 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
861
862 if (c->wbuf_len == c->wbuf_pagesize) {
863 ret = __jffs2_flush_wbuf(c, NOPAD);
864 if (ret)
865 goto outerr;
866 }
867 vlen -= wbuf_retlen;
868 outvec_to += wbuf_retlen;
869 donelen += wbuf_retlen;
870 v += wbuf_retlen;
871
872 if (vlen >= c->wbuf_pagesize) {
873 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen),
874 &wbuf_retlen, v);
875 if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen))
876 goto outfile;
877
878 vlen -= wbuf_retlen;
879 outvec_to += wbuf_retlen;
880 c->wbuf_ofs = outvec_to;
881 donelen += wbuf_retlen;
882 v += wbuf_retlen;
883 }
884
885 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen);
886 if (c->wbuf_len == c->wbuf_pagesize) {
887 ret = __jffs2_flush_wbuf(c, NOPAD);
888 if (ret)
889 goto outerr;
890 }
891
892 outvec_to += wbuf_retlen;
893 donelen += wbuf_retlen;
894 }
895
896 /*
897 * If there's a remainder in the wbuf and it's a non-GC write,
898 * remember that the wbuf affects this ino
899 */
900 *retlen = donelen;
901
902 if (jffs2_sum_active()) {
903 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to);
904 if (res)
905 return res;
906 }
907
908 if (c->wbuf_len && ino)
909 jffs2_wbuf_dirties_inode(c, ino);
910
911 ret = 0;
912 up_write(&c->wbuf_sem);
913 return ret;
914
915 outfile:
916 /*
917 * At this point we have no problem, c->wbuf is empty. However
918 * refile nextblock to avoid writing again to same address.
919 */
920
921 spin_lock(&c->erase_completion_lock);
922
923 jeb = &c->blocks[outvec_to / c->sector_size];
924 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
925
926 spin_unlock(&c->erase_completion_lock);
927
928 outerr:
929 *retlen = 0;
930 up_write(&c->wbuf_sem);
931 return ret;
932 }
933
934 /*
935 * This is the entry for flash write.
936 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
937 */
938 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len,
939 size_t *retlen, const u_char *buf)
940 {
941 struct kvec vecs[1];
942
943 if (!jffs2_is_writebuffered(c))
944 return jffs2_flash_direct_write(c, ofs, len, retlen, buf);
945
946 vecs[0].iov_base = (unsigned char *) buf;
947 vecs[0].iov_len = len;
948 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
949 }
950
951 /*
952 Handle readback from writebuffer and ECC failure return
953 */
954 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
955 {
956 loff_t orbf = 0, owbf = 0, lwbf = 0;
957 int ret;
958
959 if (!jffs2_is_writebuffered(c))
960 return mtd_read(c->mtd, ofs, len, retlen, buf);
961
962 /* Read flash */
963 down_read(&c->wbuf_sem);
964 ret = mtd_read(c->mtd, ofs, len, retlen, buf);
965
966 if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) {
967 if (ret == -EBADMSG)
968 pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
969 len, ofs);
970 /*
971 * We have the raw data without ECC correction in the buffer,
972 * maybe we are lucky and all data or parts are correct. We
973 * check the node. If data are corrupted node check will sort
974 * it out. We keep this block, it will fail on write or erase
975 * and the we mark it bad. Or should we do that now? But we
976 * should give him a chance. Maybe we had a system crash or
977 * power loss before the ecc write or a erase was completed.
978 * So we return success. :)
979 */
980 ret = 0;
981 }
982
983 /* if no writebuffer available or write buffer empty, return */
984 if (!c->wbuf_pagesize || !c->wbuf_len)
985 goto exit;
986
987 /* if we read in a different block, return */
988 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
989 goto exit;
990
991 if (ofs >= c->wbuf_ofs) {
992 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
993 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
994 goto exit;
995 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
996 if (lwbf > len)
997 lwbf = len;
998 } else {
999 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
1000 if (orbf > len) /* is write beyond write buffer ? */
1001 goto exit;
1002 lwbf = len - orbf; /* number of bytes to copy */
1003 if (lwbf > c->wbuf_len)
1004 lwbf = c->wbuf_len;
1005 }
1006 if (lwbf > 0)
1007 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
1008
1009 exit:
1010 up_read(&c->wbuf_sem);
1011 return ret;
1012 }
1013
1014 #define NR_OOB_SCAN_PAGES 4
1015
1016 /* For historical reasons we use only 8 bytes for OOB clean marker */
1017 #define OOB_CM_SIZE 8
1018
1019 static const struct jffs2_unknown_node oob_cleanmarker =
1020 {
1021 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
1022 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
1023 .totlen = constant_cpu_to_je32(8)
1024 };
1025
1026 /*
1027 * Check, if the out of band area is empty. This function knows about the clean
1028 * marker and if it is present in OOB, treats the OOB as empty anyway.
1029 */
1030 int jffs2_check_oob_empty(struct jffs2_sb_info *c,
1031 struct jffs2_eraseblock *jeb, int mode)
1032 {
1033 int i, ret;
1034 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1035 struct mtd_oob_ops ops;
1036
1037 ops.mode = MTD_OPS_AUTO_OOB;
1038 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
1039 ops.oobbuf = c->oobbuf;
1040 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1041 ops.datbuf = NULL;
1042
1043 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1044 if (ret || ops.oobretlen != ops.ooblen) {
1045 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1046 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1047 if (!ret)
1048 ret = -EIO;
1049 return ret;
1050 }
1051
1052 for(i = 0; i < ops.ooblen; i++) {
1053 if (mode && i < cmlen)
1054 /* Yeah, we know about the cleanmarker */
1055 continue;
1056
1057 if (ops.oobbuf[i] != 0xFF) {
1058 jffs2_dbg(2, "Found %02x at %x in OOB for "
1059 "%08x\n", ops.oobbuf[i], i, jeb->offset);
1060 return 1;
1061 }
1062 }
1063
1064 return 0;
1065 }
1066
1067 /*
1068 * Check for a valid cleanmarker.
1069 * Returns: 0 if a valid cleanmarker was found
1070 * 1 if no cleanmarker was found
1071 * negative error code if an error occurred
1072 */
1073 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
1074 struct jffs2_eraseblock *jeb)
1075 {
1076 struct mtd_oob_ops ops;
1077 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1078
1079 ops.mode = MTD_OPS_AUTO_OOB;
1080 ops.ooblen = cmlen;
1081 ops.oobbuf = c->oobbuf;
1082 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1083 ops.datbuf = NULL;
1084
1085 ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
1086 if (ret || ops.oobretlen != ops.ooblen) {
1087 pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1088 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1089 if (!ret)
1090 ret = -EIO;
1091 return ret;
1092 }
1093
1094 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen);
1095 }
1096
1097 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c,
1098 struct jffs2_eraseblock *jeb)
1099 {
1100 int ret;
1101 struct mtd_oob_ops ops;
1102 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
1103
1104 ops.mode = MTD_OPS_AUTO_OOB;
1105 ops.ooblen = cmlen;
1106 ops.oobbuf = (uint8_t *)&oob_cleanmarker;
1107 ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
1108 ops.datbuf = NULL;
1109
1110 ret = mtd_write_oob(c->mtd, jeb->offset, &ops);
1111 if (ret || ops.oobretlen != ops.ooblen) {
1112 pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
1113 jeb->offset, ops.ooblen, ops.oobretlen, ret);
1114 if (!ret)
1115 ret = -EIO;
1116 return ret;
1117 }
1118
1119 return 0;
1120 }
1121
1122 /*
1123 * On NAND we try to mark this block bad. If the block was erased more
1124 * than MAX_ERASE_FAILURES we mark it finally bad.
1125 * Don't care about failures. This block remains on the erase-pending
1126 * or badblock list as long as nobody manipulates the flash with
1127 * a bootloader or something like that.
1128 */
1129
1130 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1131 {
1132 int ret;
1133
1134 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1135 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1136 return 0;
1137
1138 pr_warn("JFFS2: marking eraseblock at %08x as bad\n", bad_offset);
1139 ret = mtd_block_markbad(c->mtd, bad_offset);
1140
1141 if (ret) {
1142 jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n",
1143 __func__, jeb->offset, ret);
1144 return ret;
1145 }
1146 return 1;
1147 }
1148
1149 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1150 {
1151 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
1152
1153 if (!c->mtd->oobsize)
1154 return 0;
1155
1156 /* Cleanmarker is out-of-band, so inline size zero */
1157 c->cleanmarker_size = 0;
1158
1159 if (!oinfo || oinfo->oobavail == 0) {
1160 pr_err("inconsistent device description\n");
1161 return -EINVAL;
1162 }
1163
1164 jffs2_dbg(1, "JFFS2 using OOB on NAND\n");
1165
1166 c->oobavail = oinfo->oobavail;
1167
1168 /* Initialise write buffer */
1169 init_rwsem(&c->wbuf_sem);
1170 c->wbuf_pagesize = c->mtd->writesize;
1171 c->wbuf_ofs = 0xFFFFFFFF;
1172
1173 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1174 if (!c->wbuf)
1175 return -ENOMEM;
1176
1177 c->oobbuf = kmalloc(NR_OOB_SCAN_PAGES * c->oobavail, GFP_KERNEL);
1178 if (!c->oobbuf) {
1179 kfree(c->wbuf);
1180 return -ENOMEM;
1181 }
1182
1183 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1184 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1185 if (!c->wbuf_verify) {
1186 kfree(c->oobbuf);
1187 kfree(c->wbuf);
1188 return -ENOMEM;
1189 }
1190 #endif
1191 return 0;
1192 }
1193
1194 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1195 {
1196 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1197 kfree(c->wbuf_verify);
1198 #endif
1199 kfree(c->wbuf);
1200 kfree(c->oobbuf);
1201 }
1202
1203 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1204 c->cleanmarker_size = 0; /* No cleanmarkers needed */
1205
1206 /* Initialize write buffer */
1207 init_rwsem(&c->wbuf_sem);
1208
1209
1210 c->wbuf_pagesize = c->mtd->erasesize;
1211
1212 /* Find a suitable c->sector_size
1213 * - Not too much sectors
1214 * - Sectors have to be at least 4 K + some bytes
1215 * - All known dataflashes have erase sizes of 528 or 1056
1216 * - we take at least 8 eraseblocks and want to have at least 8K size
1217 * - The concatenation should be a power of 2
1218 */
1219
1220 c->sector_size = 8 * c->mtd->erasesize;
1221
1222 while (c->sector_size < 8192) {
1223 c->sector_size *= 2;
1224 }
1225
1226 /* It may be necessary to adjust the flash size */
1227 c->flash_size = c->mtd->size;
1228
1229 if ((c->flash_size % c->sector_size) != 0) {
1230 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size;
1231 pr_warn("JFFS2 flash size adjusted to %dKiB\n", c->flash_size);
1232 };
1233
1234 c->wbuf_ofs = 0xFFFFFFFF;
1235 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1236 if (!c->wbuf)
1237 return -ENOMEM;
1238
1239 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1240 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1241 if (!c->wbuf_verify) {
1242 kfree(c->oobbuf);
1243 kfree(c->wbuf);
1244 return -ENOMEM;
1245 }
1246 #endif
1247
1248 pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1249 c->wbuf_pagesize, c->sector_size);
1250
1251 return 0;
1252 }
1253
1254 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1255 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1256 kfree(c->wbuf_verify);
1257 #endif
1258 kfree(c->wbuf);
1259 }
1260
1261 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1262 /* Cleanmarker currently occupies whole programming regions,
1263 * either one or 2 for 8Byte STMicro flashes. */
1264 c->cleanmarker_size = max(16u, c->mtd->writesize);
1265
1266 /* Initialize write buffer */
1267 init_rwsem(&c->wbuf_sem);
1268 c->wbuf_pagesize = c->mtd->writesize;
1269 c->wbuf_ofs = 0xFFFFFFFF;
1270
1271 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1272 if (!c->wbuf)
1273 return -ENOMEM;
1274
1275 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1276 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1277 if (!c->wbuf_verify) {
1278 kfree(c->wbuf);
1279 return -ENOMEM;
1280 }
1281 #endif
1282 return 0;
1283 }
1284
1285 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) {
1286 #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY
1287 kfree(c->wbuf_verify);
1288 #endif
1289 kfree(c->wbuf);
1290 }
1291
1292 int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1293 c->cleanmarker_size = 0;
1294
1295 if (c->mtd->writesize == 1)
1296 /* We do not need write-buffer */
1297 return 0;
1298
1299 init_rwsem(&c->wbuf_sem);
1300
1301 c->wbuf_pagesize = c->mtd->writesize;
1302 c->wbuf_ofs = 0xFFFFFFFF;
1303 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1304 if (!c->wbuf)
1305 return -ENOMEM;
1306
1307 pr_info("JFFS2 write-buffering enabled buffer (%d) erasesize (%d)\n",
1308 c->wbuf_pagesize, c->sector_size);
1309
1310 return 0;
1311 }
1312
1313 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) {
1314 kfree(c->wbuf);
1315 }
This page took 0.089001 seconds and 5 git commands to generate.