jffs2: Convert most D1/D2 macros to jffs2_dbg
[deliverable/linux.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
c00c310e 4 * Copyright © 2001-2007 Red Hat, Inc.
1da177e4
LT
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
1da177e4
LT
10 */
11
12#include <linux/kernel.h>
1da177e4
LT
13#include <linux/mtd/mtd.h>
14#include <linux/compiler.h>
15#include <linux/sched.h> /* For cond_resched() */
16#include "nodelist.h"
e631ddba 17#include "debug.h"
1da177e4
LT
18
19/**
20 * jffs2_reserve_space - request physical space to write nodes to flash
21 * @c: superblock info
22 * @minsize: Minimum acceptable size of allocation
1da177e4
LT
23 * @len: Returned value of allocation length
24 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
25 *
26 * Requests a block of physical space on the flash. Returns zero for success
9fe4854c
DW
27 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
28 * error if appropriate. Doesn't return len since that's
1da177e4
LT
29 *
30 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31 * allocation semaphore, to prevent more than one allocation from being
32 * active at any time. The semaphore is later released by jffs2_commit_allocation()
33 *
34 * jffs2_reserve_space() may trigger garbage collection in order to make room
35 * for the requested allocation.
36 */
37
e631ddba 38static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
9fe4854c 39 uint32_t *len, uint32_t sumsize);
1da177e4 40
9fe4854c 41int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
e631ddba 42 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
43{
44 int ret = -EAGAIN;
45 int blocksneeded = c->resv_blocks_write;
46 /* align it */
47 minsize = PAD(minsize);
48
9c261b33 49 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
ced22070 50 mutex_lock(&c->alloc_sem);
1da177e4 51
9c261b33 52 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
1da177e4
LT
53
54 spin_lock(&c->erase_completion_lock);
55
56 /* this needs a little more thought (true <tglx> :)) */
57 while(ret == -EAGAIN) {
58 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
1da177e4
LT
59 uint32_t dirty, avail;
60
61 /* calculate real dirty size
62 * dirty_size contains blocks on erase_pending_list
63 * those blocks are counted in c->nr_erasing_blocks.
64 * If one block is actually erased, it is not longer counted as dirty_space
65 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66 * with c->nr_erasing_blocks * c->sector_size again.
67 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68 * This helps us to force gc and pick eventually a clean block to spread the load.
69 * We add unchecked_size here, as we hopefully will find some space to use.
70 * This will affect the sum only once, as gc first finishes checking
71 * of nodes.
72 */
73 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 if (dirty < c->nospc_dirty_size) {
75 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
76 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
77 __func__);
1da177e4
LT
78 break;
79 }
9c261b33
JP
80 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 dirty, c->unchecked_size,
82 c->sector_size);
1da177e4
LT
83
84 spin_unlock(&c->erase_completion_lock);
ced22070 85 mutex_unlock(&c->alloc_sem);
1da177e4
LT
86 return -ENOSPC;
87 }
182ec4ee 88
1da177e4
LT
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
92 * of nodes.
182ec4ee 93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
97 */
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
101 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
102 __func__);
1da177e4
LT
103 break;
104 }
105
9c261b33
JP
106 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
107 avail, blocksneeded * c->sector_size);
1da177e4 108 spin_unlock(&c->erase_completion_lock);
ced22070 109 mutex_unlock(&c->alloc_sem);
1da177e4
LT
110 return -ENOSPC;
111 }
112
ced22070 113 mutex_unlock(&c->alloc_sem);
1da177e4 114
9c261b33
JP
115 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
116 c->nr_free_blocks, c->nr_erasing_blocks,
117 c->free_size, c->dirty_size, c->wasted_size,
118 c->used_size, c->erasing_size, c->bad_size,
119 c->free_size + c->dirty_size +
120 c->wasted_size + c->used_size +
121 c->erasing_size + c->bad_size,
122 c->flash_size);
1da177e4 123 spin_unlock(&c->erase_completion_lock);
182ec4ee 124
1da177e4 125 ret = jffs2_garbage_collect_pass(c);
422b1202 126
0717bf84
DW
127 if (ret == -EAGAIN) {
128 spin_lock(&c->erase_completion_lock);
129 if (c->nr_erasing_blocks &&
130 list_empty(&c->erase_pending_list) &&
131 list_empty(&c->erase_complete_list)) {
132 DECLARE_WAITQUEUE(wait, current);
133 set_current_state(TASK_UNINTERRUPTIBLE);
134 add_wait_queue(&c->erase_wait, &wait);
9c261b33
JP
135 jffs2_dbg(1, "%s waiting for erase to complete\n",
136 __func__);
0717bf84
DW
137 spin_unlock(&c->erase_completion_lock);
138
139 schedule();
140 } else
141 spin_unlock(&c->erase_completion_lock);
142 } else if (ret)
1da177e4
LT
143 return ret;
144
145 cond_resched();
146
147 if (signal_pending(current))
148 return -EINTR;
149
ced22070 150 mutex_lock(&c->alloc_sem);
1da177e4
LT
151 spin_lock(&c->erase_completion_lock);
152 }
153
9fe4854c 154 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 155 if (ret) {
9c261b33 156 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
1da177e4
LT
157 }
158 }
159 spin_unlock(&c->erase_completion_lock);
2f785402 160 if (!ret)
046b8b98 161 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
1da177e4 162 if (ret)
ced22070 163 mutex_unlock(&c->alloc_sem);
1da177e4
LT
164 return ret;
165}
166
9fe4854c
DW
167int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
168 uint32_t *len, uint32_t sumsize)
1da177e4
LT
169{
170 int ret = -EAGAIN;
171 minsize = PAD(minsize);
172
9c261b33 173 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
1da177e4
LT
174
175 spin_lock(&c->erase_completion_lock);
176 while(ret == -EAGAIN) {
9fe4854c 177 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 178 if (ret) {
9c261b33
JP
179 jffs2_dbg(1, "%s(): looping, ret is %d\n",
180 __func__, ret);
1da177e4
LT
181 }
182 }
183 spin_unlock(&c->erase_completion_lock);
2f785402 184 if (!ret)
046b8b98 185 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
2f785402 186
1da177e4
LT
187 return ret;
188}
189
e631ddba
FH
190
191/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
192
193static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 194{
e631ddba 195
99c2594f 196 if (c->nextblock == NULL) {
9c261b33
JP
197 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
198 __func__, jeb->offset);
99c2594f
AH
199 return;
200 }
e631ddba
FH
201 /* Check, if we have a dirty block now, or if it was dirty already */
202 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
203 c->dirty_size += jeb->wasted_size;
204 c->wasted_size -= jeb->wasted_size;
205 jeb->dirty_size += jeb->wasted_size;
206 jeb->wasted_size = 0;
207 if (VERYDIRTY(c, jeb->dirty_size)) {
9c261b33
JP
208 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
209 jeb->offset, jeb->free_size, jeb->dirty_size,
210 jeb->used_size);
e631ddba
FH
211 list_add_tail(&jeb->list, &c->very_dirty_list);
212 } else {
9c261b33
JP
213 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
214 jeb->offset, jeb->free_size, jeb->dirty_size,
215 jeb->used_size);
e631ddba
FH
216 list_add_tail(&jeb->list, &c->dirty_list);
217 }
182ec4ee 218 } else {
9c261b33
JP
219 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
220 jeb->offset, jeb->free_size, jeb->dirty_size,
221 jeb->used_size);
e631ddba
FH
222 list_add_tail(&jeb->list, &c->clean_list);
223 }
224 c->nextblock = NULL;
225
226}
227
228/* Select a new jeb for nextblock */
229
230static int jffs2_find_nextblock(struct jffs2_sb_info *c)
231{
232 struct list_head *next;
182ec4ee 233
e631ddba
FH
234 /* Take the next block off the 'free' list */
235
236 if (list_empty(&c->free_list)) {
237
238 if (!c->nr_erasing_blocks &&
239 !list_empty(&c->erasable_list)) {
240 struct jffs2_eraseblock *ejeb;
241
242 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
f116629d 243 list_move_tail(&ejeb->list, &c->erase_pending_list);
e631ddba 244 c->nr_erasing_blocks++;
ae3b6ba0 245 jffs2_garbage_collect_trigger(c);
9c261b33
JP
246 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
247 __func__, ejeb->offset);
e631ddba
FH
248 }
249
250 if (!c->nr_erasing_blocks &&
251 !list_empty(&c->erasable_pending_wbuf_list)) {
9c261b33
JP
252 jffs2_dbg(1, "%s(): Flushing write buffer\n",
253 __func__);
e631ddba 254 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 255 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
256 jffs2_flush_wbuf_pad(c);
257 spin_lock(&c->erase_completion_lock);
e631ddba
FH
258 /* Have another go. It'll be on the erasable_list now */
259 return -EAGAIN;
1da177e4 260 }
e631ddba
FH
261
262 if (!c->nr_erasing_blocks) {
263 /* Ouch. We're in GC, or we wouldn't have got here.
264 And there's no space left. At all. */
182ec4ee
TG
265 printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
266 c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
e631ddba
FH
267 list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
268 return -ENOSPC;
1da177e4 269 }
e631ddba
FH
270
271 spin_unlock(&c->erase_completion_lock);
272 /* Don't wait for it; just erase one right now */
273 jffs2_erase_pending_blocks(c, 1);
274 spin_lock(&c->erase_completion_lock);
275
276 /* An erase may have failed, decreasing the
277 amount of free space available. So we must
278 restart from the beginning */
279 return -EAGAIN;
1da177e4 280 }
e631ddba
FH
281
282 next = c->free_list.next;
283 list_del(next);
284 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
285 c->nr_free_blocks--;
182ec4ee 286
e631ddba
FH
287 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
288
f04de505 289#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
5bf17237
AB
290 /* adjust write buffer offset, else we get a non contiguous write bug */
291 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
292 c->wbuf_ofs = 0xffffffff;
f04de505 293#endif
5bf17237 294
9c261b33
JP
295 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
296 __func__, c->nextblock->offset);
e631ddba
FH
297
298 return 0;
299}
300
301/* Called with alloc sem _and_ erase_completion_lock */
9fe4854c
DW
302static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
303 uint32_t *len, uint32_t sumsize)
e631ddba
FH
304{
305 struct jffs2_eraseblock *jeb = c->nextblock;
9fe4854c 306 uint32_t reserved_size; /* for summary information at the end of the jeb */
e631ddba
FH
307 int ret;
308
309 restart:
310 reserved_size = 0;
311
312 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
313 /* NOSUM_SIZE means not to generate summary */
314
315 if (jeb) {
316 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 317 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
318 "summary->size=%d , sumsize=%d\n",
319 minsize, jeb->free_size,
320 c->summary->sum_size, sumsize);
321 }
322
323 /* Is there enough space for writing out the current node, or we have to
324 write out summary information now, close this jeb and select new nextblock? */
325 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
326 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
327
328 /* Has summary been disabled for this jeb? */
329 if (jffs2_sum_is_disabled(c->summary)) {
330 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
331 goto restart;
1da177e4
LT
332 }
333
e631ddba 334 /* Writing out the collected summary information */
733802d9 335 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
336 ret = jffs2_sum_write_sumnode(c);
337
338 if (ret)
339 return ret;
340
341 if (jffs2_sum_is_disabled(c->summary)) {
342 /* jffs2_write_sumnode() couldn't write out the summary information
343 diabling summary for this jeb and free the collected information
344 */
345 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
346 goto restart;
347 }
348
349 jffs2_close_nextblock(c, jeb);
350 jeb = NULL;
34c0e906
FH
351 /* keep always valid value in reserved_size */
352 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
353 }
354 } else {
355 if (jeb && minsize > jeb->free_size) {
fc6612f6
DW
356 uint32_t waste;
357
e631ddba
FH
358 /* Skip the end of this block and file it as having some dirty space */
359 /* If there's a pending write to it, flush now */
360
361 if (jffs2_wbuf_dirty(c)) {
1da177e4 362 spin_unlock(&c->erase_completion_lock);
9c261b33
JP
363 jffs2_dbg(1, "%s(): Flushing write buffer\n",
364 __func__);
1da177e4
LT
365 jffs2_flush_wbuf_pad(c);
366 spin_lock(&c->erase_completion_lock);
e631ddba
FH
367 jeb = c->nextblock;
368 goto restart;
1da177e4
LT
369 }
370
fc6612f6
DW
371 spin_unlock(&c->erase_completion_lock);
372
373 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
374 if (ret)
375 return ret;
376 /* Just lock it again and continue. Nothing much can change because
377 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
378 we hold c->erase_completion_lock in the majority of this function...
379 but that's a question for another (more caffeine-rich) day. */
380 spin_lock(&c->erase_completion_lock);
381
382 waste = jeb->free_size;
383 jffs2_link_node_ref(c, jeb,
384 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
385 waste, NULL);
386 /* FIXME: that made it count as dirty. Convert to wasted */
387 jeb->dirty_size -= waste;
388 c->dirty_size -= waste;
389 jeb->wasted_size += waste;
390 c->wasted_size += waste;
1da177e4 391
e631ddba
FH
392 jffs2_close_nextblock(c, jeb);
393 jeb = NULL;
1da177e4 394 }
e631ddba
FH
395 }
396
397 if (!jeb) {
398
399 ret = jffs2_find_nextblock(c);
400 if (ret)
401 return ret;
1da177e4 402
e631ddba 403 jeb = c->nextblock;
1da177e4
LT
404
405 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
406 printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
407 goto restart;
408 }
409 }
410 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
411 enough space */
e631ddba 412 *len = jeb->free_size - reserved_size;
1da177e4
LT
413
414 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
415 !jeb->first_node->next_in_ino) {
182ec4ee 416 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 417 So mark it obsolete now that there's going to be another node
182ec4ee 418 in the block. This will reduce used_size to zero but We've
1da177e4
LT
419 already set c->nextblock so that jffs2_mark_node_obsolete()
420 won't try to refile it to the dirty_list.
421 */
422 spin_unlock(&c->erase_completion_lock);
423 jffs2_mark_node_obsolete(c, jeb->first_node);
424 spin_lock(&c->erase_completion_lock);
425 }
426
9c261b33
JP
427 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
428 __func__,
429 *len, jeb->offset + (c->sector_size - jeb->free_size));
1da177e4
LT
430 return 0;
431}
432
433/**
434 * jffs2_add_physical_node_ref - add a physical node reference to the list
435 * @c: superblock info
436 * @new: new node reference to add
437 * @len: length of this physical node
1da177e4 438 *
182ec4ee 439 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
440 * by jffs2_reserve_space.
441 *
442 * Must be called with the alloc_sem held.
443 */
182ec4ee 444
2f785402
DW
445struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
446 uint32_t ofs, uint32_t len,
447 struct jffs2_inode_cache *ic)
1da177e4
LT
448{
449 struct jffs2_eraseblock *jeb;
2f785402 450 struct jffs2_raw_node_ref *new;
1da177e4 451
2f785402 452 jeb = &c->blocks[ofs / c->sector_size];
1da177e4 453
9c261b33
JP
454 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
455 __func__, ofs & ~3, ofs & 3, len);
1da177e4 456#if 1
2f785402
DW
457 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
458 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
459 even after refiling c->nextblock */
460 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
461 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
66bfaeaa
DW
462 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
463 if (c->nextblock)
464 printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
465 else
466 printk(KERN_WARNING "No nextblock");
467 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
2f785402 468 return ERR_PTR(-EINVAL);
1da177e4
LT
469 }
470#endif
471 spin_lock(&c->erase_completion_lock);
472
2f785402 473 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
1da177e4 474
9b88f473 475 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4 476 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
9c261b33
JP
477 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
478 jeb->offset, jeb->free_size, jeb->dirty_size,
479 jeb->used_size);
1da177e4
LT
480 if (jffs2_wbuf_dirty(c)) {
481 /* Flush the last write in the block if it's outstanding */
482 spin_unlock(&c->erase_completion_lock);
483 jffs2_flush_wbuf_pad(c);
484 spin_lock(&c->erase_completion_lock);
485 }
486
487 list_add_tail(&jeb->list, &c->clean_list);
488 c->nextblock = NULL;
489 }
e0c8e42f
AB
490 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
491 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
492
493 spin_unlock(&c->erase_completion_lock);
494
2f785402 495 return new;
1da177e4
LT
496}
497
498
499void jffs2_complete_reservation(struct jffs2_sb_info *c)
500{
9c261b33 501 jffs2_dbg(1, "jffs2_complete_reservation()\n");
acb64a43 502 spin_lock(&c->erase_completion_lock);
1da177e4 503 jffs2_garbage_collect_trigger(c);
acb64a43 504 spin_unlock(&c->erase_completion_lock);
ced22070 505 mutex_unlock(&c->alloc_sem);
1da177e4
LT
506}
507
508static inline int on_list(struct list_head *obj, struct list_head *head)
509{
510 struct list_head *this;
511
512 list_for_each(this, head) {
513 if (this == obj) {
9c261b33 514 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
1da177e4
LT
515 return 1;
516
517 }
518 }
519 return 0;
520}
521
522void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
523{
524 struct jffs2_eraseblock *jeb;
525 int blocknr;
526 struct jffs2_unknown_node n;
527 int ret, addedsize;
528 size_t retlen;
1417fc44 529 uint32_t freed_len;
1da177e4 530
9bfeb691 531 if(unlikely(!ref)) {
1da177e4
LT
532 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
533 return;
534 }
535 if (ref_obsolete(ref)) {
9c261b33
JP
536 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
537 __func__, ref_offset(ref));
1da177e4
LT
538 return;
539 }
540 blocknr = ref->flash_offset / c->sector_size;
541 if (blocknr >= c->nr_blocks) {
542 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
543 BUG();
544 }
545 jeb = &c->blocks[blocknr];
546
547 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 548 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
549 /* Hm. This may confuse static lock analysis. If any of the above
550 three conditions is false, we're going to return from this
1da177e4
LT
551 function without actually obliterating any nodes or freeing
552 any jffs2_raw_node_refs. So we don't need to stop erases from
553 happening, or protect against people holding an obsolete
554 jffs2_raw_node_ref without the erase_completion_lock. */
ced22070 555 mutex_lock(&c->erase_free_sem);
1da177e4
LT
556 }
557
558 spin_lock(&c->erase_completion_lock);
559
1417fc44
DW
560 freed_len = ref_totlen(c, jeb, ref);
561
1da177e4 562 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 563 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
1da177e4 564 printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
1417fc44 565 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
566 BUG();
567 })
9c261b33
JP
568 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
569 ref_offset(ref), freed_len);
1417fc44
DW
570 jeb->unchecked_size -= freed_len;
571 c->unchecked_size -= freed_len;
1da177e4 572 } else {
1417fc44 573 D1(if (unlikely(jeb->used_size < freed_len)) {
1da177e4 574 printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
1417fc44 575 freed_len, blocknr, ref->flash_offset, jeb->used_size);
1da177e4
LT
576 BUG();
577 })
9c261b33
JP
578 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
579 ref_offset(ref), freed_len);
1417fc44
DW
580 jeb->used_size -= freed_len;
581 c->used_size -= freed_len;
1da177e4
LT
582 }
583
584 // Take care, that wasted size is taken into concern
1417fc44 585 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
9c261b33 586 jffs2_dbg(1, "Dirtying\n");
1417fc44
DW
587 addedsize = freed_len;
588 jeb->dirty_size += freed_len;
589 c->dirty_size += freed_len;
1da177e4
LT
590
591 /* Convert wasted space to dirty, if not a bad block */
592 if (jeb->wasted_size) {
593 if (on_list(&jeb->list, &c->bad_used_list)) {
9c261b33
JP
594 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
595 jeb->offset);
1da177e4
LT
596 addedsize = 0; /* To fool the refiling code later */
597 } else {
9c261b33
JP
598 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
599 jeb->wasted_size, jeb->offset);
1da177e4
LT
600 addedsize += jeb->wasted_size;
601 jeb->dirty_size += jeb->wasted_size;
602 c->dirty_size += jeb->wasted_size;
603 c->wasted_size -= jeb->wasted_size;
604 jeb->wasted_size = 0;
605 }
606 }
607 } else {
9c261b33 608 jffs2_dbg(1, "Wasting\n");
1da177e4 609 addedsize = 0;
1417fc44
DW
610 jeb->wasted_size += freed_len;
611 c->wasted_size += freed_len;
1da177e4
LT
612 }
613 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 614
e0c8e42f
AB
615 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
616 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 617
31fbdf7a
AB
618 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
619 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 620 lists because they're not ready yet, and don't actually
182ec4ee 621 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
622 marked obsolete on the flash at the time they _became_
623 obsolete, there was probably a reason for that. */
624 spin_unlock(&c->erase_completion_lock);
625 /* We didn't lock the erase_free_sem */
626 return;
627 }
628
629 if (jeb == c->nextblock) {
9c261b33
JP
630 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
631 jeb->offset);
1da177e4
LT
632 } else if (!jeb->used_size && !jeb->unchecked_size) {
633 if (jeb == c->gcblock) {
9c261b33
JP
634 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
635 jeb->offset);
1da177e4
LT
636 c->gcblock = NULL;
637 } else {
9c261b33
JP
638 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
639 jeb->offset);
1da177e4
LT
640 list_del(&jeb->list);
641 }
642 if (jffs2_wbuf_dirty(c)) {
9c261b33 643 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
1da177e4
LT
644 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
645 } else {
646 if (jiffies & 127) {
647 /* Most of the time, we just erase it immediately. Otherwise we
648 spend ages scanning it on mount, etc. */
9c261b33 649 jffs2_dbg(1, "...and adding to erase_pending_list\n");
1da177e4
LT
650 list_add_tail(&jeb->list, &c->erase_pending_list);
651 c->nr_erasing_blocks++;
ae3b6ba0 652 jffs2_garbage_collect_trigger(c);
1da177e4
LT
653 } else {
654 /* Sometimes, however, we leave it elsewhere so it doesn't get
655 immediately reused, and we spread the load a bit. */
9c261b33 656 jffs2_dbg(1, "...and adding to erasable_list\n");
1da177e4 657 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 658 }
1da177e4 659 }
9c261b33 660 jffs2_dbg(1, "Done OK\n");
1da177e4 661 } else if (jeb == c->gcblock) {
9c261b33
JP
662 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
663 jeb->offset);
1da177e4 664 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
9c261b33
JP
665 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
666 jeb->offset);
1da177e4 667 list_del(&jeb->list);
9c261b33 668 jffs2_dbg(1, "...and adding to dirty_list\n");
1da177e4
LT
669 list_add_tail(&jeb->list, &c->dirty_list);
670 } else if (VERYDIRTY(c, jeb->dirty_size) &&
671 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
9c261b33
JP
672 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
673 jeb->offset);
1da177e4 674 list_del(&jeb->list);
9c261b33 675 jffs2_dbg(1, "...and adding to very_dirty_list\n");
1da177e4
LT
676 list_add_tail(&jeb->list, &c->very_dirty_list);
677 } else {
9c261b33
JP
678 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
679 jeb->offset, jeb->free_size, jeb->dirty_size,
680 jeb->used_size);
182ec4ee 681 }
1da177e4
LT
682
683 spin_unlock(&c->erase_completion_lock);
684
31fbdf7a
AB
685 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
686 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
687 /* We didn't lock the erase_free_sem */
688 return;
689 }
690
691 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
692 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
693 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
c38c1b61 694 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
1da177e4 695
9c261b33
JP
696 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
697 ref_offset(ref));
1da177e4
LT
698 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
699 if (ret) {
700 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
701 goto out_erase_sem;
702 }
703 if (retlen != sizeof(n)) {
704 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
705 goto out_erase_sem;
706 }
1417fc44
DW
707 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
708 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
709 goto out_erase_sem;
710 }
711 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
9c261b33
JP
712 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
713 ref_offset(ref), je16_to_cpu(n.nodetype));
1da177e4
LT
714 goto out_erase_sem;
715 }
716 /* XXX FIXME: This is ugly now */
717 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
718 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
719 if (ret) {
720 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
721 goto out_erase_sem;
722 }
723 if (retlen != sizeof(n)) {
724 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
725 goto out_erase_sem;
726 }
727
728 /* Nodes which have been marked obsolete no longer need to be
729 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
730
731 Note we can't do this for NAND at the moment because we need
1da177e4
LT
732 obsolete dirent nodes to stay on the lists, because of the
733 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 734 because we delete the inocache, and on NAND we need that to
1da177e4
LT
735 stay around until all the nodes are actually erased, in order
736 to stop us from giving the same inode number to another newly
737 created inode. */
738 if (ref->next_in_ino) {
739 struct jffs2_inode_cache *ic;
740 struct jffs2_raw_node_ref **p;
741
742 spin_lock(&c->erase_completion_lock);
743
744 ic = jffs2_raw_ref_to_ic(ref);
745 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
746 ;
747
748 *p = ref->next_in_ino;
749 ref->next_in_ino = NULL;
750
c9f700f8
KK
751 switch (ic->class) {
752#ifdef CONFIG_JFFS2_FS_XATTR
753 case RAWNODE_CLASS_XATTR_DATUM:
754 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
755 break;
756 case RAWNODE_CLASS_XATTR_REF:
757 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
758 break;
759#endif
760 default:
27c72b04 761 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
c9f700f8
KK
762 jffs2_del_ino_cache(c, ic);
763 break;
764 }
1da177e4
LT
765 spin_unlock(&c->erase_completion_lock);
766 }
767
1da177e4 768 out_erase_sem:
ced22070 769 mutex_unlock(&c->erase_free_sem);
1da177e4
LT
770}
771
1da177e4
LT
772int jffs2_thread_should_wake(struct jffs2_sb_info *c)
773{
774 int ret = 0;
775 uint32_t dirty;
8fb870df
DW
776 int nr_very_dirty = 0;
777 struct jffs2_eraseblock *jeb;
1da177e4 778
d6ce1710
JT
779 if (!list_empty(&c->erase_complete_list) ||
780 !list_empty(&c->erase_pending_list))
781 return 1;
782
1da177e4 783 if (c->unchecked_size) {
9c261b33
JP
784 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
785 c->unchecked_size, c->checked_ino);
1da177e4
LT
786 return 1;
787 }
788
789 /* dirty_size contains blocks on erase_pending_list
790 * those blocks are counted in c->nr_erasing_blocks.
791 * If one block is actually erased, it is not longer counted as dirty_space
792 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
793 * with c->nr_erasing_blocks * c->sector_size again.
794 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
795 * This helps us to force gc and pick eventually a clean block to spread the load.
796 */
797 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
798
182ec4ee
TG
799 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
800 (dirty > c->nospc_dirty_size))
1da177e4
LT
801 ret = 1;
802
8fb870df
DW
803 list_for_each_entry(jeb, &c->very_dirty_list, list) {
804 nr_very_dirty++;
805 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
806 ret = 1;
a8c68f32
DW
807 /* In debug mode, actually go through and count them all */
808 D1(continue);
809 break;
8fb870df
DW
810 }
811 }
812
9c261b33
JP
813 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
814 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
815 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
1da177e4
LT
816
817 return ret;
818}
This page took 0.506599 seconds and 5 git commands to generate.