[MTD] [NAND] at91_nand: Make part_probes[] static
[deliverable/linux.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
c00c310e 4 * Copyright © 2001-2007 Red Hat, Inc.
1da177e4
LT
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
1da177e4
LT
10 */
11
12#include <linux/kernel.h>
13#include <linux/slab.h>
14#include <linux/mtd/mtd.h>
15#include <linux/compiler.h>
16#include <linux/sched.h> /* For cond_resched() */
17#include "nodelist.h"
e631ddba 18#include "debug.h"
1da177e4
LT
19
20/**
21 * jffs2_reserve_space - request physical space to write nodes to flash
22 * @c: superblock info
23 * @minsize: Minimum acceptable size of allocation
1da177e4
LT
24 * @len: Returned value of allocation length
25 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
26 *
27 * Requests a block of physical space on the flash. Returns zero for success
9fe4854c
DW
28 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
29 * error if appropriate. Doesn't return len since that's
1da177e4
LT
30 *
31 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32 * allocation semaphore, to prevent more than one allocation from being
33 * active at any time. The semaphore is later released by jffs2_commit_allocation()
34 *
35 * jffs2_reserve_space() may trigger garbage collection in order to make room
36 * for the requested allocation.
37 */
38
e631ddba 39static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
9fe4854c 40 uint32_t *len, uint32_t sumsize);
1da177e4 41
9fe4854c 42int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
e631ddba 43 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
44{
45 int ret = -EAGAIN;
46 int blocksneeded = c->resv_blocks_write;
47 /* align it */
48 minsize = PAD(minsize);
49
50 D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51 down(&c->alloc_sem);
52
53 D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55 spin_lock(&c->erase_completion_lock);
56
57 /* this needs a little more thought (true <tglx> :)) */
58 while(ret == -EAGAIN) {
59 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
1da177e4
LT
60 uint32_t dirty, avail;
61
62 /* calculate real dirty size
63 * dirty_size contains blocks on erase_pending_list
64 * those blocks are counted in c->nr_erasing_blocks.
65 * If one block is actually erased, it is not longer counted as dirty_space
66 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67 * with c->nr_erasing_blocks * c->sector_size again.
68 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69 * This helps us to force gc and pick eventually a clean block to spread the load.
70 * We add unchecked_size here, as we hopefully will find some space to use.
71 * This will affect the sum only once, as gc first finishes checking
72 * of nodes.
73 */
74 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75 if (dirty < c->nospc_dirty_size) {
76 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 77 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
1da177e4
LT
78 break;
79 }
80 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 dirty, c->unchecked_size, c->sector_size));
82
83 spin_unlock(&c->erase_completion_lock);
84 up(&c->alloc_sem);
85 return -ENOSPC;
86 }
182ec4ee 87
1da177e4
LT
88 /* Calc possibly available space. Possibly available means that we
89 * don't know, if unchecked size contains obsoleted nodes, which could give us some
90 * more usable space. This will affect the sum only once, as gc first finishes checking
91 * of nodes.
182ec4ee 92 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
93 * blocksneeded * sector_size.
94 * This blocks endless gc looping on a filesystem, which is nearly full, even if
95 * the check above passes.
96 */
97 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98 if ( (avail / c->sector_size) <= blocksneeded) {
99 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
4132ace8 100 D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
1da177e4
LT
101 break;
102 }
103
104 D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105 avail, blocksneeded * c->sector_size));
106 spin_unlock(&c->erase_completion_lock);
107 up(&c->alloc_sem);
108 return -ENOSPC;
109 }
110
111 up(&c->alloc_sem);
112
113 D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114 c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115 c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116 spin_unlock(&c->erase_completion_lock);
182ec4ee 117
1da177e4
LT
118 ret = jffs2_garbage_collect_pass(c);
119 if (ret)
120 return ret;
121
122 cond_resched();
123
124 if (signal_pending(current))
125 return -EINTR;
126
127 down(&c->alloc_sem);
128 spin_lock(&c->erase_completion_lock);
129 }
130
9fe4854c 131 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4
LT
132 if (ret) {
133 D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
134 }
135 }
136 spin_unlock(&c->erase_completion_lock);
2f785402 137 if (!ret)
046b8b98 138 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
1da177e4
LT
139 if (ret)
140 up(&c->alloc_sem);
141 return ret;
142}
143
9fe4854c
DW
144int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
145 uint32_t *len, uint32_t sumsize)
1da177e4
LT
146{
147 int ret = -EAGAIN;
148 minsize = PAD(minsize);
149
150 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
151
152 spin_lock(&c->erase_completion_lock);
153 while(ret == -EAGAIN) {
9fe4854c 154 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 155 if (ret) {
ef53cb02 156 D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
1da177e4
LT
157 }
158 }
159 spin_unlock(&c->erase_completion_lock);
2f785402 160 if (!ret)
046b8b98 161 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
2f785402 162
1da177e4
LT
163 return ret;
164}
165
e631ddba
FH
166
167/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
168
169static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 170{
e631ddba 171
99c2594f
AH
172 if (c->nextblock == NULL) {
173 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
174 jeb->offset));
175 return;
176 }
e631ddba
FH
177 /* Check, if we have a dirty block now, or if it was dirty already */
178 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
179 c->dirty_size += jeb->wasted_size;
180 c->wasted_size -= jeb->wasted_size;
181 jeb->dirty_size += jeb->wasted_size;
182 jeb->wasted_size = 0;
183 if (VERYDIRTY(c, jeb->dirty_size)) {
184 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
185 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
186 list_add_tail(&jeb->list, &c->very_dirty_list);
187 } else {
188 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
189 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
190 list_add_tail(&jeb->list, &c->dirty_list);
191 }
182ec4ee 192 } else {
e631ddba
FH
193 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
194 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
195 list_add_tail(&jeb->list, &c->clean_list);
196 }
197 c->nextblock = NULL;
198
199}
200
201/* Select a new jeb for nextblock */
202
203static int jffs2_find_nextblock(struct jffs2_sb_info *c)
204{
205 struct list_head *next;
182ec4ee 206
e631ddba
FH
207 /* Take the next block off the 'free' list */
208
209 if (list_empty(&c->free_list)) {
210
211 if (!c->nr_erasing_blocks &&
212 !list_empty(&c->erasable_list)) {
213 struct jffs2_eraseblock *ejeb;
214
215 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
f116629d 216 list_move_tail(&ejeb->list, &c->erase_pending_list);
e631ddba
FH
217 c->nr_erasing_blocks++;
218 jffs2_erase_pending_trigger(c);
219 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
220 ejeb->offset));
221 }
222
223 if (!c->nr_erasing_blocks &&
224 !list_empty(&c->erasable_pending_wbuf_list)) {
225 D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
226 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 227 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
228 jffs2_flush_wbuf_pad(c);
229 spin_lock(&c->erase_completion_lock);
e631ddba
FH
230 /* Have another go. It'll be on the erasable_list now */
231 return -EAGAIN;
1da177e4 232 }
e631ddba
FH
233
234 if (!c->nr_erasing_blocks) {
235 /* Ouch. We're in GC, or we wouldn't have got here.
236 And there's no space left. At all. */
182ec4ee
TG
237 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
238 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
e631ddba
FH
239 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
240 return -ENOSPC;
1da177e4 241 }
e631ddba
FH
242
243 spin_unlock(&c->erase_completion_lock);
244 /* Don't wait for it; just erase one right now */
245 jffs2_erase_pending_blocks(c, 1);
246 spin_lock(&c->erase_completion_lock);
247
248 /* An erase may have failed, decreasing the
249 amount of free space available. So we must
250 restart from the beginning */
251 return -EAGAIN;
1da177e4 252 }
e631ddba
FH
253
254 next = c->free_list.next;
255 list_del(next);
256 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
257 c->nr_free_blocks--;
182ec4ee 258
e631ddba
FH
259 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
260
261 D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
262
263 return 0;
264}
265
266/* Called with alloc sem _and_ erase_completion_lock */
9fe4854c
DW
267static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
268 uint32_t *len, uint32_t sumsize)
e631ddba
FH
269{
270 struct jffs2_eraseblock *jeb = c->nextblock;
9fe4854c 271 uint32_t reserved_size; /* for summary information at the end of the jeb */
e631ddba
FH
272 int ret;
273
274 restart:
275 reserved_size = 0;
276
277 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
278 /* NOSUM_SIZE means not to generate summary */
279
280 if (jeb) {
281 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 282 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
283 "summary->size=%d , sumsize=%d\n",
284 minsize, jeb->free_size,
285 c->summary->sum_size, sumsize);
286 }
287
288 /* Is there enough space for writing out the current node, or we have to
289 write out summary information now, close this jeb and select new nextblock? */
290 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
291 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
292
293 /* Has summary been disabled for this jeb? */
294 if (jffs2_sum_is_disabled(c->summary)) {
295 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
296 goto restart;
1da177e4
LT
297 }
298
e631ddba 299 /* Writing out the collected summary information */
733802d9 300 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
301 ret = jffs2_sum_write_sumnode(c);
302
303 if (ret)
304 return ret;
305
306 if (jffs2_sum_is_disabled(c->summary)) {
307 /* jffs2_write_sumnode() couldn't write out the summary information
308 diabling summary for this jeb and free the collected information
309 */
310 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
311 goto restart;
312 }
313
314 jffs2_close_nextblock(c, jeb);
315 jeb = NULL;
34c0e906
FH
316 /* keep always valid value in reserved_size */
317 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
318 }
319 } else {
320 if (jeb && minsize > jeb->free_size) {
fc6612f6
DW
321 uint32_t waste;
322
e631ddba
FH
323 /* Skip the end of this block and file it as having some dirty space */
324 /* If there's a pending write to it, flush now */
325
326 if (jffs2_wbuf_dirty(c)) {
1da177e4 327 spin_unlock(&c->erase_completion_lock);
e631ddba 328 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
1da177e4
LT
329 jffs2_flush_wbuf_pad(c);
330 spin_lock(&c->erase_completion_lock);
e631ddba
FH
331 jeb = c->nextblock;
332 goto restart;
1da177e4
LT
333 }
334
fc6612f6
DW
335 spin_unlock(&c->erase_completion_lock);
336
337 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
338 if (ret)
339 return ret;
340 /* Just lock it again and continue. Nothing much can change because
341 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
342 we hold c->erase_completion_lock in the majority of this function...
343 but that's a question for another (more caffeine-rich) day. */
344 spin_lock(&c->erase_completion_lock);
345
346 waste = jeb->free_size;
347 jffs2_link_node_ref(c, jeb,
348 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
349 waste, NULL);
350 /* FIXME: that made it count as dirty. Convert to wasted */
351 jeb->dirty_size -= waste;
352 c->dirty_size -= waste;
353 jeb->wasted_size += waste;
354 c->wasted_size += waste;
1da177e4 355
e631ddba
FH
356 jffs2_close_nextblock(c, jeb);
357 jeb = NULL;
1da177e4 358 }
e631ddba
FH
359 }
360
361 if (!jeb) {
362
363 ret = jffs2_find_nextblock(c);
364 if (ret)
365 return ret;
1da177e4 366
e631ddba 367 jeb = c->nextblock;
1da177e4
LT
368
369 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
370 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
371 goto restart;
372 }
373 }
374 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
375 enough space */
e631ddba 376 *len = jeb->free_size - reserved_size;
1da177e4
LT
377
378 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
379 !jeb->first_node->next_in_ino) {
182ec4ee 380 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 381 So mark it obsolete now that there's going to be another node
182ec4ee 382 in the block. This will reduce used_size to zero but We've
1da177e4
LT
383 already set c->nextblock so that jffs2_mark_node_obsolete()
384 won't try to refile it to the dirty_list.
385 */
386 spin_unlock(&c->erase_completion_lock);
387 jffs2_mark_node_obsolete(c, jeb->first_node);
388 spin_lock(&c->erase_completion_lock);
389 }
390
9fe4854c
DW
391 D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
392 *len, jeb->offset + (c->sector_size - jeb->free_size)));
1da177e4
LT
393 return 0;
394}
395
396/**
397 * jffs2_add_physical_node_ref - add a physical node reference to the list
398 * @c: superblock info
399 * @new: new node reference to add
400 * @len: length of this physical node
1da177e4 401 *
182ec4ee 402 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
403 * by jffs2_reserve_space.
404 *
405 * Must be called with the alloc_sem held.
406 */
182ec4ee 407
2f785402
DW
408struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
409 uint32_t ofs, uint32_t len,
410 struct jffs2_inode_cache *ic)
1da177e4
LT
411{
412 struct jffs2_eraseblock *jeb;
2f785402 413 struct jffs2_raw_node_ref *new;
1da177e4 414
2f785402 415 jeb = &c->blocks[ofs / c->sector_size];
1da177e4 416
2f785402
DW
417 D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
418 ofs & ~3, ofs & 3, len));
1da177e4 419#if 1
2f785402
DW
420 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
421 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
422 even after refiling c->nextblock */
423 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
424 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
66bfaeaa
DW
425 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
426 if (c->nextblock)
427 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
428 else
429 printk(KERN_WARNING "No nextblock");
430 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
2f785402 431 return ERR_PTR(-EINVAL);
1da177e4
LT
432 }
433#endif
434 spin_lock(&c->erase_completion_lock);
435
2f785402 436 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
1da177e4 437
9b88f473 438 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4
LT
439 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
440 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
441 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
442 if (jffs2_wbuf_dirty(c)) {
443 /* Flush the last write in the block if it's outstanding */
444 spin_unlock(&c->erase_completion_lock);
445 jffs2_flush_wbuf_pad(c);
446 spin_lock(&c->erase_completion_lock);
447 }
448
449 list_add_tail(&jeb->list, &c->clean_list);
450 c->nextblock = NULL;
451 }
e0c8e42f
AB
452 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
453 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
454
455 spin_unlock(&c->erase_completion_lock);
456
2f785402 457 return new;
1da177e4
LT
458}
459
460
461void jffs2_complete_reservation(struct jffs2_sb_info *c)
462{
463 D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
464 jffs2_garbage_collect_trigger(c);
465 up(&c->alloc_sem);
466}
467
468static inline int on_list(struct list_head *obj, struct list_head *head)
469{
470 struct list_head *this;
471
472 list_for_each(this, head) {
473 if (this == obj) {
474 D1(printk("%p is on list at %p\n", obj, head));
475 return 1;
476
477 }
478 }
479 return 0;
480}
481
482void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
483{
484 struct jffs2_eraseblock *jeb;
485 int blocknr;
486 struct jffs2_unknown_node n;
487 int ret, addedsize;
488 size_t retlen;
1417fc44 489 uint32_t freed_len;
1da177e4 490
9bfeb691 491 if(unlikely(!ref)) {
1da177e4
LT
492 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
493 return;
494 }
495 if (ref_obsolete(ref)) {
496 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
497 return;
498 }
499 blocknr = ref->flash_offset / c->sector_size;
500 if (blocknr >= c->nr_blocks) {
501 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
502 BUG();
503 }
504 jeb = &c->blocks[blocknr];
505
506 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 507 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
508 /* Hm. This may confuse static lock analysis. If any of the above
509 three conditions is false, we're going to return from this
1da177e4
LT
510 function without actually obliterating any nodes or freeing
511 any jffs2_raw_node_refs. So we don't need to stop erases from
512 happening, or protect against people holding an obsolete
513 jffs2_raw_node_ref without the erase_completion_lock. */
514 down(&c->erase_free_sem);
515 }
516
517 spin_lock(&c->erase_completion_lock);
518
1417fc44
DW
519 freed_len = ref_totlen(c, jeb, ref);
520
1da177e4 521 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 522 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
1da177e4 523 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
1417fc44 524 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
525 BUG();
526 })
1417fc44
DW
527 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
528 jeb->unchecked_size -= freed_len;
529 c->unchecked_size -= freed_len;
1da177e4 530 } else {
1417fc44 531 D1(if (unlikely(jeb->used_size < freed_len)) {
1da177e4 532 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
1417fc44 533 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
534 BUG();
535 })
1417fc44
DW
536 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
537 jeb->used_size -= freed_len;
538 c->used_size -= freed_len;
1da177e4
LT
539 }
540
541 // Take care, that wasted size is taken into concern
1417fc44 542 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
c7c16c8e 543 D1(printk("Dirtying\n"));
1417fc44
DW
544 addedsize = freed_len;
545 jeb->dirty_size += freed_len;
546 c->dirty_size += freed_len;
1da177e4
LT
547
548 /* Convert wasted space to dirty, if not a bad block */
549 if (jeb->wasted_size) {
550 if (on_list(&jeb->list, &c->bad_used_list)) {
551 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
552 jeb->offset));
553 addedsize = 0; /* To fool the refiling code later */
554 } else {
555 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
556 jeb->wasted_size, jeb->offset));
557 addedsize += jeb->wasted_size;
558 jeb->dirty_size += jeb->wasted_size;
559 c->dirty_size += jeb->wasted_size;
560 c->wasted_size -= jeb->wasted_size;
561 jeb->wasted_size = 0;
562 }
563 }
564 } else {
c7c16c8e 565 D1(printk("Wasting\n"));
1da177e4 566 addedsize = 0;
1417fc44
DW
567 jeb->wasted_size += freed_len;
568 c->wasted_size += freed_len;
1da177e4
LT
569 }
570 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 571
e0c8e42f
AB
572 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
573 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 574
31fbdf7a
AB
575 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
576 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 577 lists because they're not ready yet, and don't actually
182ec4ee 578 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
579 marked obsolete on the flash at the time they _became_
580 obsolete, there was probably a reason for that. */
581 spin_unlock(&c->erase_completion_lock);
582 /* We didn't lock the erase_free_sem */
583 return;
584 }
585
586 if (jeb == c->nextblock) {
587 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
588 } else if (!jeb->used_size && !jeb->unchecked_size) {
589 if (jeb == c->gcblock) {
590 D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
591 c->gcblock = NULL;
592 } else {
593 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
594 list_del(&jeb->list);
595 }
596 if (jffs2_wbuf_dirty(c)) {
597 D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
598 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
599 } else {
600 if (jiffies & 127) {
601 /* Most of the time, we just erase it immediately. Otherwise we
602 spend ages scanning it on mount, etc. */
603 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
604 list_add_tail(&jeb->list, &c->erase_pending_list);
605 c->nr_erasing_blocks++;
606 jffs2_erase_pending_trigger(c);
607 } else {
608 /* Sometimes, however, we leave it elsewhere so it doesn't get
609 immediately reused, and we spread the load a bit. */
610 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
611 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 612 }
1da177e4
LT
613 }
614 D1(printk(KERN_DEBUG "Done OK\n"));
615 } else if (jeb == c->gcblock) {
616 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
617 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
618 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
619 list_del(&jeb->list);
620 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
621 list_add_tail(&jeb->list, &c->dirty_list);
622 } else if (VERYDIRTY(c, jeb->dirty_size) &&
623 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
624 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
625 list_del(&jeb->list);
626 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
627 list_add_tail(&jeb->list, &c->very_dirty_list);
628 } else {
629 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
182ec4ee
TG
630 jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
631 }
1da177e4
LT
632
633 spin_unlock(&c->erase_completion_lock);
634
31fbdf7a
AB
635 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
636 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
637 /* We didn't lock the erase_free_sem */
638 return;
639 }
640
641 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
642 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
643 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
c38c1b61 644 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
1da177e4
LT
645
646 D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
647 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
648 if (ret) {
649 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
650 goto out_erase_sem;
651 }
652 if (retlen != sizeof(n)) {
653 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
654 goto out_erase_sem;
655 }
1417fc44
DW
656 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
657 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
658 goto out_erase_sem;
659 }
660 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
661 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
662 goto out_erase_sem;
663 }
664 /* XXX FIXME: This is ugly now */
665 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
666 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
667 if (ret) {
668 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
669 goto out_erase_sem;
670 }
671 if (retlen != sizeof(n)) {
672 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
673 goto out_erase_sem;
674 }
675
676 /* Nodes which have been marked obsolete no longer need to be
677 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
678
679 Note we can't do this for NAND at the moment because we need
1da177e4
LT
680 obsolete dirent nodes to stay on the lists, because of the
681 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 682 because we delete the inocache, and on NAND we need that to
1da177e4
LT
683 stay around until all the nodes are actually erased, in order
684 to stop us from giving the same inode number to another newly
685 created inode. */
686 if (ref->next_in_ino) {
687 struct jffs2_inode_cache *ic;
688 struct jffs2_raw_node_ref **p;
689
690 spin_lock(&c->erase_completion_lock);
691
692 ic = jffs2_raw_ref_to_ic(ref);
693 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
694 ;
695
696 *p = ref->next_in_ino;
697 ref->next_in_ino = NULL;
698
c9f700f8
KK
699 switch (ic->class) {
700#ifdef CONFIG_JFFS2_FS_XATTR
701 case RAWNODE_CLASS_XATTR_DATUM:
702 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
703 break;
704 case RAWNODE_CLASS_XATTR_REF:
705 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
706 break;
707#endif
708 default:
709 if (ic->nodes == (void *)ic && ic->nlink == 0)
710 jffs2_del_ino_cache(c, ic);
711 break;
712 }
1da177e4
LT
713 spin_unlock(&c->erase_completion_lock);
714 }
715
1da177e4
LT
716 out_erase_sem:
717 up(&c->erase_free_sem);
718}
719
1da177e4
LT
720int jffs2_thread_should_wake(struct jffs2_sb_info *c)
721{
722 int ret = 0;
723 uint32_t dirty;
8fb870df
DW
724 int nr_very_dirty = 0;
725 struct jffs2_eraseblock *jeb;
1da177e4
LT
726
727 if (c->unchecked_size) {
728 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
729 c->unchecked_size, c->checked_ino));
730 return 1;
731 }
732
733 /* dirty_size contains blocks on erase_pending_list
734 * those blocks are counted in c->nr_erasing_blocks.
735 * If one block is actually erased, it is not longer counted as dirty_space
736 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
737 * with c->nr_erasing_blocks * c->sector_size again.
738 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
739 * This helps us to force gc and pick eventually a clean block to spread the load.
740 */
741 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
742
182ec4ee
TG
743 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
744 (dirty > c->nospc_dirty_size))
1da177e4
LT
745 ret = 1;
746
8fb870df
DW
747 list_for_each_entry(jeb, &c->very_dirty_list, list) {
748 nr_very_dirty++;
749 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
750 ret = 1;
a8c68f32
DW
751 /* In debug mode, actually go through and count them all */
752 D1(continue);
753 break;
8fb870df
DW
754 }
755 }
756
757 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
758 c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
1da177e4
LT
759
760 return ret;
761}
This page took 0.388775 seconds and 5 git commands to generate.