[JFFS2] Introduce ref_next() macro for finding next physical node
[deliverable/linux.git] / fs / jffs2 / nodemgmt.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
10 * $Id: nodemgmt.c,v 1.127 2005/09/20 15:49:12 dedekind Exp $
11 *
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/compiler.h>
18 #include <linux/sched.h> /* For cond_resched() */
19 #include "nodelist.h"
20 #include "debug.h"
21
22 /**
23 * jffs2_reserve_space - request physical space to write nodes to flash
24 * @c: superblock info
25 * @minsize: Minimum acceptable size of allocation
26 * @len: Returned value of allocation length
27 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28 *
29 * Requests a block of physical space on the flash. Returns zero for success
30 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
31 * error if appropriate. Doesn't return len since that's
32 *
33 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34 * allocation semaphore, to prevent more than one allocation from being
35 * active at any time. The semaphore is later released by jffs2_commit_allocation()
36 *
37 * jffs2_reserve_space() may trigger garbage collection in order to make room
38 * for the requested allocation.
39 */
40
41 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
42 uint32_t *len, uint32_t sumsize);
43
44 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
45 uint32_t *len, int prio, uint32_t sumsize)
46 {
47 int ret = -EAGAIN;
48 int blocksneeded = c->resv_blocks_write;
49 /* align it */
50 minsize = PAD(minsize);
51
52 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
53 down(&c->alloc_sem);
54
55 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
56
57 spin_lock(&c->erase_completion_lock);
58
59 /* this needs a little more thought (true <tglx> :)) */
60 while(ret == -EAGAIN) {
61 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
62 int ret;
63 uint32_t dirty, avail;
64
65 /* calculate real dirty size
66 * dirty_size contains blocks on erase_pending_list
67 * those blocks are counted in c->nr_erasing_blocks.
68 * If one block is actually erased, it is not longer counted as dirty_space
69 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
70 * with c->nr_erasing_blocks * c->sector_size again.
71 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
72 * This helps us to force gc and pick eventually a clean block to spread the load.
73 * We add unchecked_size here, as we hopefully will find some space to use.
74 * This will affect the sum only once, as gc first finishes checking
75 * of nodes.
76 */
77 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
78 if (dirty < c->nospc_dirty_size) {
79 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
80 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
81 break;
82 }
83 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
84 dirty, c->unchecked_size, c->sector_size));
85
86 spin_unlock(&c->erase_completion_lock);
87 up(&c->alloc_sem);
88 return -ENOSPC;
89 }
90
91 /* Calc possibly available space. Possibly available means that we
92 * don't know, if unchecked size contains obsoleted nodes, which could give us some
93 * more usable space. This will affect the sum only once, as gc first finishes checking
94 * of nodes.
95 + Return -ENOSPC, if the maximum possibly available space is less or equal than
96 * blocksneeded * sector_size.
97 * This blocks endless gc looping on a filesystem, which is nearly full, even if
98 * the check above passes.
99 */
100 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
101 if ( (avail / c->sector_size) <= blocksneeded) {
102 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
103 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
104 break;
105 }
106
107 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
108 avail, blocksneeded * c->sector_size));
109 spin_unlock(&c->erase_completion_lock);
110 up(&c->alloc_sem);
111 return -ENOSPC;
112 }
113
114 up(&c->alloc_sem);
115
116 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
117 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
118 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
119 spin_unlock(&c->erase_completion_lock);
120
121 ret = jffs2_garbage_collect_pass(c);
122 if (ret)
123 return ret;
124
125 cond_resched();
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 down(&c->alloc_sem);
131 spin_lock(&c->erase_completion_lock);
132 }
133
134 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
135 if (ret) {
136 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137 }
138 }
139 spin_unlock(&c->erase_completion_lock);
140 if (!ret)
141 ret = jffs2_prealloc_raw_node_refs(c, 1);
142 if (ret)
143 up(&c->alloc_sem);
144 return ret;
145 }
146
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148 uint32_t *len, uint32_t sumsize)
149 {
150 int ret = -EAGAIN;
151 minsize = PAD(minsize);
152
153 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154
155 spin_lock(&c->erase_completion_lock);
156 while(ret == -EAGAIN) {
157 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158 if (ret) {
159 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160 }
161 }
162 spin_unlock(&c->erase_completion_lock);
163 if (!ret)
164 ret = jffs2_prealloc_raw_node_refs(c, 1);
165
166 return ret;
167 }
168
169
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173 {
174
175 /* Check, if we have a dirty block now, or if it was dirty already */
176 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
177 c->dirty_size += jeb->wasted_size;
178 c->wasted_size -= jeb->wasted_size;
179 jeb->dirty_size += jeb->wasted_size;
180 jeb->wasted_size = 0;
181 if (VERYDIRTY(c, jeb->dirty_size)) {
182 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
183 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
184 list_add_tail(&jeb->list, &c->very_dirty_list);
185 } else {
186 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
187 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
188 list_add_tail(&jeb->list, &c->dirty_list);
189 }
190 } else {
191 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193 list_add_tail(&jeb->list, &c->clean_list);
194 }
195 c->nextblock = NULL;
196
197 }
198
199 /* Select a new jeb for nextblock */
200
201 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
202 {
203 struct list_head *next;
204
205 /* Take the next block off the 'free' list */
206
207 if (list_empty(&c->free_list)) {
208
209 if (!c->nr_erasing_blocks &&
210 !list_empty(&c->erasable_list)) {
211 struct jffs2_eraseblock *ejeb;
212
213 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
214 list_del(&ejeb->list);
215 list_add_tail(&ejeb->list, &c->erase_pending_list);
216 c->nr_erasing_blocks++;
217 jffs2_erase_pending_trigger(c);
218 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
219 ejeb->offset));
220 }
221
222 if (!c->nr_erasing_blocks &&
223 !list_empty(&c->erasable_pending_wbuf_list)) {
224 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
225 /* c->nextblock is NULL, no update to c->nextblock allowed */
226 spin_unlock(&c->erase_completion_lock);
227 jffs2_flush_wbuf_pad(c);
228 spin_lock(&c->erase_completion_lock);
229 /* Have another go. It'll be on the erasable_list now */
230 return -EAGAIN;
231 }
232
233 if (!c->nr_erasing_blocks) {
234 /* Ouch. We're in GC, or we wouldn't have got here.
235 And there's no space left. At all. */
236 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
237 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
238 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
239 return -ENOSPC;
240 }
241
242 spin_unlock(&c->erase_completion_lock);
243 /* Don't wait for it; just erase one right now */
244 jffs2_erase_pending_blocks(c, 1);
245 spin_lock(&c->erase_completion_lock);
246
247 /* An erase may have failed, decreasing the
248 amount of free space available. So we must
249 restart from the beginning */
250 return -EAGAIN;
251 }
252
253 next = c->free_list.next;
254 list_del(next);
255 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
256 c->nr_free_blocks--;
257
258 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
259
260 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
261
262 return 0;
263 }
264
265 /* Called with alloc sem _and_ erase_completion_lock */
266 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
267 uint32_t *len, uint32_t sumsize)
268 {
269 struct jffs2_eraseblock *jeb = c->nextblock;
270 uint32_t reserved_size; /* for summary information at the end of the jeb */
271 int ret;
272
273 restart:
274 reserved_size = 0;
275
276 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
277 /* NOSUM_SIZE means not to generate summary */
278
279 if (jeb) {
280 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
281 dbg_summary("minsize=%d , jeb->free=%d ,"
282 "summary->size=%d , sumsize=%d\n",
283 minsize, jeb->free_size,
284 c->summary->sum_size, sumsize);
285 }
286
287 /* Is there enough space for writing out the current node, or we have to
288 write out summary information now, close this jeb and select new nextblock? */
289 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
290 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
291
292 /* Has summary been disabled for this jeb? */
293 if (jffs2_sum_is_disabled(c->summary)) {
294 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
295 goto restart;
296 }
297
298 /* Writing out the collected summary information */
299 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
300 ret = jffs2_sum_write_sumnode(c);
301
302 if (ret)
303 return ret;
304
305 if (jffs2_sum_is_disabled(c->summary)) {
306 /* jffs2_write_sumnode() couldn't write out the summary information
307 diabling summary for this jeb and free the collected information
308 */
309 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
310 goto restart;
311 }
312
313 jffs2_close_nextblock(c, jeb);
314 jeb = NULL;
315 /* keep always valid value in reserved_size */
316 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
317 }
318 } else {
319 if (jeb && minsize > jeb->free_size) {
320 /* Skip the end of this block and file it as having some dirty space */
321 /* If there's a pending write to it, flush now */
322
323 if (jffs2_wbuf_dirty(c)) {
324 spin_unlock(&c->erase_completion_lock);
325 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
326 jffs2_flush_wbuf_pad(c);
327 spin_lock(&c->erase_completion_lock);
328 jeb = c->nextblock;
329 goto restart;
330 }
331
332 c->wasted_size += jeb->free_size;
333 c->free_size -= jeb->free_size;
334 jeb->wasted_size += jeb->free_size;
335 jeb->free_size = 0;
336
337 jffs2_close_nextblock(c, jeb);
338 jeb = NULL;
339 }
340 }
341
342 if (!jeb) {
343
344 ret = jffs2_find_nextblock(c);
345 if (ret)
346 return ret;
347
348 jeb = c->nextblock;
349
350 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
351 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
352 goto restart;
353 }
354 }
355 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
356 enough space */
357 *len = jeb->free_size - reserved_size;
358
359 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
360 !jeb->first_node->next_in_ino) {
361 /* Only node in it beforehand was a CLEANMARKER node (we think).
362 So mark it obsolete now that there's going to be another node
363 in the block. This will reduce used_size to zero but We've
364 already set c->nextblock so that jffs2_mark_node_obsolete()
365 won't try to refile it to the dirty_list.
366 */
367 spin_unlock(&c->erase_completion_lock);
368 jffs2_mark_node_obsolete(c, jeb->first_node);
369 spin_lock(&c->erase_completion_lock);
370 }
371
372 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
373 *len, jeb->offset + (c->sector_size - jeb->free_size)));
374 return 0;
375 }
376
377 /**
378 * jffs2_add_physical_node_ref - add a physical node reference to the list
379 * @c: superblock info
380 * @new: new node reference to add
381 * @len: length of this physical node
382 *
383 * Should only be used to report nodes for which space has been allocated
384 * by jffs2_reserve_space.
385 *
386 * Must be called with the alloc_sem held.
387 */
388
389 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
390 uint32_t ofs, uint32_t len,
391 struct jffs2_inode_cache *ic)
392 {
393 struct jffs2_eraseblock *jeb;
394 struct jffs2_raw_node_ref *new;
395
396 jeb = &c->blocks[ofs / c->sector_size];
397
398 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
399 ofs & ~3, ofs & 3, len));
400 #if 1
401 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
402 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
403 even after refiling c->nextblock */
404 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
405 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
406 printk(KERN_WARNING "argh. node added in wrong place\n");
407 return ERR_PTR(-EINVAL);
408 }
409 #endif
410 spin_lock(&c->erase_completion_lock);
411
412 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
413
414 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
415 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
416 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
417 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
418 if (jffs2_wbuf_dirty(c)) {
419 /* Flush the last write in the block if it's outstanding */
420 spin_unlock(&c->erase_completion_lock);
421 jffs2_flush_wbuf_pad(c);
422 spin_lock(&c->erase_completion_lock);
423 }
424
425 list_add_tail(&jeb->list, &c->clean_list);
426 c->nextblock = NULL;
427 }
428 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
429 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
430
431 spin_unlock(&c->erase_completion_lock);
432
433 return new;
434 }
435
436
437 void jffs2_complete_reservation(struct jffs2_sb_info *c)
438 {
439 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
440 jffs2_garbage_collect_trigger(c);
441 up(&c->alloc_sem);
442 }
443
444 static inline int on_list(struct list_head *obj, struct list_head *head)
445 {
446 struct list_head *this;
447
448 list_for_each(this, head) {
449 if (this == obj) {
450 D1(printk("%p is on list at %p\n", obj, head));
451 return 1;
452
453 }
454 }
455 return 0;
456 }
457
458 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
459 {
460 struct jffs2_eraseblock *jeb;
461 struct jffs2_raw_node_ref *next_ref;
462 int blocknr;
463 struct jffs2_unknown_node n;
464 int ret, addedsize;
465 size_t retlen;
466 uint32_t freed_len;
467
468 if(!ref) {
469 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
470 return;
471 }
472 if (ref_obsolete(ref)) {
473 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
474 return;
475 }
476 blocknr = ref->flash_offset / c->sector_size;
477 if (blocknr >= c->nr_blocks) {
478 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
479 BUG();
480 }
481 jeb = &c->blocks[blocknr];
482
483 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
484 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
485 /* Hm. This may confuse static lock analysis. If any of the above
486 three conditions is false, we're going to return from this
487 function without actually obliterating any nodes or freeing
488 any jffs2_raw_node_refs. So we don't need to stop erases from
489 happening, or protect against people holding an obsolete
490 jffs2_raw_node_ref without the erase_completion_lock. */
491 down(&c->erase_free_sem);
492 }
493
494 spin_lock(&c->erase_completion_lock);
495
496 freed_len = ref_totlen(c, jeb, ref);
497
498 if (ref_flags(ref) == REF_UNCHECKED) {
499 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
500 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
501 freed_len, blocknr, ref->flash_offset, jeb->used_size);
502 BUG();
503 })
504 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
505 jeb->unchecked_size -= freed_len;
506 c->unchecked_size -= freed_len;
507 } else {
508 D1(if (unlikely(jeb->used_size < freed_len)) {
509 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
510 freed_len, blocknr, ref->flash_offset, jeb->used_size);
511 BUG();
512 })
513 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
514 jeb->used_size -= freed_len;
515 c->used_size -= freed_len;
516 }
517
518 // Take care, that wasted size is taken into concern
519 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
520 D1(printk(KERN_DEBUG "Dirtying\n"));
521 addedsize = freed_len;
522 jeb->dirty_size += freed_len;
523 c->dirty_size += freed_len;
524
525 /* Convert wasted space to dirty, if not a bad block */
526 if (jeb->wasted_size) {
527 if (on_list(&jeb->list, &c->bad_used_list)) {
528 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
529 jeb->offset));
530 addedsize = 0; /* To fool the refiling code later */
531 } else {
532 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
533 jeb->wasted_size, jeb->offset));
534 addedsize += jeb->wasted_size;
535 jeb->dirty_size += jeb->wasted_size;
536 c->dirty_size += jeb->wasted_size;
537 c->wasted_size -= jeb->wasted_size;
538 jeb->wasted_size = 0;
539 }
540 }
541 } else {
542 D1(printk(KERN_DEBUG "Wasting\n"));
543 addedsize = 0;
544 jeb->wasted_size += freed_len;
545 c->wasted_size += freed_len;
546 }
547 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
548
549 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
550 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
551
552 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
553 /* Flash scanning is in progress. Don't muck about with the block
554 lists because they're not ready yet, and don't actually
555 obliterate nodes that look obsolete. If they weren't
556 marked obsolete on the flash at the time they _became_
557 obsolete, there was probably a reason for that. */
558 spin_unlock(&c->erase_completion_lock);
559 /* We didn't lock the erase_free_sem */
560 return;
561 }
562
563 if (jeb == c->nextblock) {
564 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
565 } else if (!jeb->used_size && !jeb->unchecked_size) {
566 if (jeb == c->gcblock) {
567 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
568 c->gcblock = NULL;
569 } else {
570 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
571 list_del(&jeb->list);
572 }
573 if (jffs2_wbuf_dirty(c)) {
574 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
575 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
576 } else {
577 if (jiffies & 127) {
578 /* Most of the time, we just erase it immediately. Otherwise we
579 spend ages scanning it on mount, etc. */
580 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
581 list_add_tail(&jeb->list, &c->erase_pending_list);
582 c->nr_erasing_blocks++;
583 jffs2_erase_pending_trigger(c);
584 } else {
585 /* Sometimes, however, we leave it elsewhere so it doesn't get
586 immediately reused, and we spread the load a bit. */
587 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
588 list_add_tail(&jeb->list, &c->erasable_list);
589 }
590 }
591 D1(printk(KERN_DEBUG "Done OK\n"));
592 } else if (jeb == c->gcblock) {
593 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
594 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
595 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
596 list_del(&jeb->list);
597 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
598 list_add_tail(&jeb->list, &c->dirty_list);
599 } else if (VERYDIRTY(c, jeb->dirty_size) &&
600 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
601 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
602 list_del(&jeb->list);
603 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
604 list_add_tail(&jeb->list, &c->very_dirty_list);
605 } else {
606 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
607 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
608 }
609
610 spin_unlock(&c->erase_completion_lock);
611
612 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
613 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
614 /* We didn't lock the erase_free_sem */
615 return;
616 }
617
618 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
619 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
620 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
621 by jffs2_free_all_node_refs() in erase.c. Which is nice. */
622
623 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
624 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
625 if (ret) {
626 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
627 goto out_erase_sem;
628 }
629 if (retlen != sizeof(n)) {
630 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
631 goto out_erase_sem;
632 }
633 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
634 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
635 goto out_erase_sem;
636 }
637 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
638 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
639 goto out_erase_sem;
640 }
641 /* XXX FIXME: This is ugly now */
642 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
643 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
644 if (ret) {
645 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
646 goto out_erase_sem;
647 }
648 if (retlen != sizeof(n)) {
649 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
650 goto out_erase_sem;
651 }
652
653 /* Nodes which have been marked obsolete no longer need to be
654 associated with any inode. Remove them from the per-inode list.
655
656 Note we can't do this for NAND at the moment because we need
657 obsolete dirent nodes to stay on the lists, because of the
658 horridness in jffs2_garbage_collect_deletion_dirent(). Also
659 because we delete the inocache, and on NAND we need that to
660 stay around until all the nodes are actually erased, in order
661 to stop us from giving the same inode number to another newly
662 created inode. */
663 if (ref->next_in_ino) {
664 struct jffs2_inode_cache *ic;
665 struct jffs2_raw_node_ref **p;
666
667 spin_lock(&c->erase_completion_lock);
668
669 ic = jffs2_raw_ref_to_ic(ref);
670 /* It seems we should never call jffs2_mark_node_obsolete() for
671 XATTR nodes.... yet. Make sure we notice if/when we change
672 that :) */
673 BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
674 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
675 ;
676
677 *p = ref->next_in_ino;
678 ref->next_in_ino = NULL;
679
680 if (ic->nodes == (void *)ic && ic->nlink == 0)
681 jffs2_del_ino_cache(c, ic);
682
683 spin_unlock(&c->erase_completion_lock);
684 }
685
686
687 /* Merge with the next node in the physical list, if there is one
688 and if it's also obsolete and if it doesn't belong to any inode */
689 next_ref = ref_next(ref);
690
691 if (next_ref && ref_obsolete(next_ref) && !next_ref->next_in_ino) {
692 spin_lock(&c->erase_completion_lock);
693
694 #ifdef TEST_TOTLEN
695 ref->__totlen += next_ref->__totlen;
696 #endif
697 ref->next_phys = ref_next(next_ref);
698 if (jeb->last_node == next_ref) jeb->last_node = ref;
699 if (jeb->gc_node == next_ref) {
700 /* gc will be happy continuing gc on this node */
701 jeb->gc_node=ref;
702 }
703 spin_unlock(&c->erase_completion_lock);
704
705 __jffs2_free_raw_node_ref(next_ref);
706 }
707
708 /* Also merge with the previous node in the list, if there is one
709 and that one is obsolete */
710 if (ref != jeb->first_node ) {
711 struct jffs2_raw_node_ref *p = jeb->first_node;
712
713 spin_lock(&c->erase_completion_lock);
714
715 while ((next_ref = ref_next(ref)) != ref)
716 p = next_ref;
717
718 if (ref_obsolete(p) && !ref->next_in_ino) {
719 #ifdef TEST_TOTLEN
720 p->__totlen += ref->__totlen;
721 #endif
722 if (jeb->last_node == ref) {
723 jeb->last_node = p;
724 }
725 if (jeb->gc_node == ref) {
726 /* gc will be happy continuing gc on this node */
727 jeb->gc_node=p;
728 }
729 p->next_phys = ref_next(ref);
730 __jffs2_free_raw_node_ref(ref);
731 }
732 spin_unlock(&c->erase_completion_lock);
733 }
734 out_erase_sem:
735 up(&c->erase_free_sem);
736 }
737
738 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
739 {
740 int ret = 0;
741 uint32_t dirty;
742
743 if (c->unchecked_size) {
744 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
745 c->unchecked_size, c->checked_ino));
746 return 1;
747 }
748
749 /* dirty_size contains blocks on erase_pending_list
750 * those blocks are counted in c->nr_erasing_blocks.
751 * If one block is actually erased, it is not longer counted as dirty_space
752 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
753 * with c->nr_erasing_blocks * c->sector_size again.
754 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
755 * This helps us to force gc and pick eventually a clean block to spread the load.
756 */
757 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
758
759 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
760 (dirty > c->nospc_dirty_size))
761 ret = 1;
762
763 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n",
764 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no"));
765
766 return ret;
767 }
This page took 0.064518 seconds and 5 git commands to generate.