jffs2: Convert printks to pr_<level>
[deliverable/linux.git] / fs / jffs2 / nodemgmt.c
CommitLineData
1da177e4
LT
1/*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
c00c310e 4 * Copyright © 2001-2007 Red Hat, Inc.
1da177e4
LT
5 *
6 * Created by David Woodhouse <dwmw2@infradead.org>
7 *
8 * For licensing information, see the file 'LICENCE' in this directory.
9 *
1da177e4
LT
10 */
11
12#include <linux/kernel.h>
1da177e4
LT
13#include <linux/mtd/mtd.h>
14#include <linux/compiler.h>
15#include <linux/sched.h> /* For cond_resched() */
16#include "nodelist.h"
e631ddba 17#include "debug.h"
1da177e4
LT
18
19/**
20 * jffs2_reserve_space - request physical space to write nodes to flash
21 * @c: superblock info
22 * @minsize: Minimum acceptable size of allocation
1da177e4
LT
23 * @len: Returned value of allocation length
24 * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
25 *
26 * Requests a block of physical space on the flash. Returns zero for success
9fe4854c
DW
27 * and puts 'len' into the appropriate place, or returns -ENOSPC or other
28 * error if appropriate. Doesn't return len since that's
1da177e4
LT
29 *
30 * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
31 * allocation semaphore, to prevent more than one allocation from being
32 * active at any time. The semaphore is later released by jffs2_commit_allocation()
33 *
34 * jffs2_reserve_space() may trigger garbage collection in order to make room
35 * for the requested allocation.
36 */
37
e631ddba 38static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
9fe4854c 39 uint32_t *len, uint32_t sumsize);
1da177e4 40
9fe4854c 41int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
e631ddba 42 uint32_t *len, int prio, uint32_t sumsize)
1da177e4
LT
43{
44 int ret = -EAGAIN;
45 int blocksneeded = c->resv_blocks_write;
46 /* align it */
47 minsize = PAD(minsize);
48
9c261b33 49 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
ced22070 50 mutex_lock(&c->alloc_sem);
1da177e4 51
9c261b33 52 jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
1da177e4
LT
53
54 spin_lock(&c->erase_completion_lock);
55
56 /* this needs a little more thought (true <tglx> :)) */
57 while(ret == -EAGAIN) {
58 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
1da177e4
LT
59 uint32_t dirty, avail;
60
61 /* calculate real dirty size
62 * dirty_size contains blocks on erase_pending_list
63 * those blocks are counted in c->nr_erasing_blocks.
64 * If one block is actually erased, it is not longer counted as dirty_space
65 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
66 * with c->nr_erasing_blocks * c->sector_size again.
67 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
68 * This helps us to force gc and pick eventually a clean block to spread the load.
69 * We add unchecked_size here, as we hopefully will find some space to use.
70 * This will affect the sum only once, as gc first finishes checking
71 * of nodes.
72 */
73 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
74 if (dirty < c->nospc_dirty_size) {
75 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
76 jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
77 __func__);
1da177e4
LT
78 break;
79 }
9c261b33
JP
80 jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81 dirty, c->unchecked_size,
82 c->sector_size);
1da177e4
LT
83
84 spin_unlock(&c->erase_completion_lock);
ced22070 85 mutex_unlock(&c->alloc_sem);
1da177e4
LT
86 return -ENOSPC;
87 }
182ec4ee 88
1da177e4
LT
89 /* Calc possibly available space. Possibly available means that we
90 * don't know, if unchecked size contains obsoleted nodes, which could give us some
91 * more usable space. This will affect the sum only once, as gc first finishes checking
92 * of nodes.
182ec4ee 93 + Return -ENOSPC, if the maximum possibly available space is less or equal than
1da177e4
LT
94 * blocksneeded * sector_size.
95 * This blocks endless gc looping on a filesystem, which is nearly full, even if
96 * the check above passes.
97 */
98 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
99 if ( (avail / c->sector_size) <= blocksneeded) {
100 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
9c261b33
JP
101 jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
102 __func__);
1da177e4
LT
103 break;
104 }
105
9c261b33
JP
106 jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
107 avail, blocksneeded * c->sector_size);
1da177e4 108 spin_unlock(&c->erase_completion_lock);
ced22070 109 mutex_unlock(&c->alloc_sem);
1da177e4
LT
110 return -ENOSPC;
111 }
112
ced22070 113 mutex_unlock(&c->alloc_sem);
1da177e4 114
9c261b33
JP
115 jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
116 c->nr_free_blocks, c->nr_erasing_blocks,
117 c->free_size, c->dirty_size, c->wasted_size,
118 c->used_size, c->erasing_size, c->bad_size,
119 c->free_size + c->dirty_size +
120 c->wasted_size + c->used_size +
121 c->erasing_size + c->bad_size,
122 c->flash_size);
1da177e4 123 spin_unlock(&c->erase_completion_lock);
182ec4ee 124
1da177e4 125 ret = jffs2_garbage_collect_pass(c);
422b1202 126
0717bf84
DW
127 if (ret == -EAGAIN) {
128 spin_lock(&c->erase_completion_lock);
129 if (c->nr_erasing_blocks &&
130 list_empty(&c->erase_pending_list) &&
131 list_empty(&c->erase_complete_list)) {
132 DECLARE_WAITQUEUE(wait, current);
133 set_current_state(TASK_UNINTERRUPTIBLE);
134 add_wait_queue(&c->erase_wait, &wait);
9c261b33
JP
135 jffs2_dbg(1, "%s waiting for erase to complete\n",
136 __func__);
0717bf84
DW
137 spin_unlock(&c->erase_completion_lock);
138
139 schedule();
140 } else
141 spin_unlock(&c->erase_completion_lock);
142 } else if (ret)
1da177e4
LT
143 return ret;
144
145 cond_resched();
146
147 if (signal_pending(current))
148 return -EINTR;
149
ced22070 150 mutex_lock(&c->alloc_sem);
1da177e4
LT
151 spin_lock(&c->erase_completion_lock);
152 }
153
9fe4854c 154 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 155 if (ret) {
9c261b33 156 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
1da177e4
LT
157 }
158 }
159 spin_unlock(&c->erase_completion_lock);
2f785402 160 if (!ret)
046b8b98 161 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
1da177e4 162 if (ret)
ced22070 163 mutex_unlock(&c->alloc_sem);
1da177e4
LT
164 return ret;
165}
166
9fe4854c
DW
167int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
168 uint32_t *len, uint32_t sumsize)
1da177e4
LT
169{
170 int ret = -EAGAIN;
171 minsize = PAD(minsize);
172
9c261b33 173 jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
1da177e4
LT
174
175 spin_lock(&c->erase_completion_lock);
176 while(ret == -EAGAIN) {
9fe4854c 177 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
1da177e4 178 if (ret) {
9c261b33
JP
179 jffs2_dbg(1, "%s(): looping, ret is %d\n",
180 __func__, ret);
1da177e4
LT
181 }
182 }
183 spin_unlock(&c->erase_completion_lock);
2f785402 184 if (!ret)
046b8b98 185 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
2f785402 186
1da177e4
LT
187 return ret;
188}
189
e631ddba
FH
190
191/* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
192
193static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1da177e4 194{
e631ddba 195
99c2594f 196 if (c->nextblock == NULL) {
9c261b33
JP
197 jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
198 __func__, jeb->offset);
99c2594f
AH
199 return;
200 }
e631ddba
FH
201 /* Check, if we have a dirty block now, or if it was dirty already */
202 if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
203 c->dirty_size += jeb->wasted_size;
204 c->wasted_size -= jeb->wasted_size;
205 jeb->dirty_size += jeb->wasted_size;
206 jeb->wasted_size = 0;
207 if (VERYDIRTY(c, jeb->dirty_size)) {
9c261b33
JP
208 jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
209 jeb->offset, jeb->free_size, jeb->dirty_size,
210 jeb->used_size);
e631ddba
FH
211 list_add_tail(&jeb->list, &c->very_dirty_list);
212 } else {
9c261b33
JP
213 jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
214 jeb->offset, jeb->free_size, jeb->dirty_size,
215 jeb->used_size);
e631ddba
FH
216 list_add_tail(&jeb->list, &c->dirty_list);
217 }
182ec4ee 218 } else {
9c261b33
JP
219 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
220 jeb->offset, jeb->free_size, jeb->dirty_size,
221 jeb->used_size);
e631ddba
FH
222 list_add_tail(&jeb->list, &c->clean_list);
223 }
224 c->nextblock = NULL;
225
226}
227
228/* Select a new jeb for nextblock */
229
230static int jffs2_find_nextblock(struct jffs2_sb_info *c)
231{
232 struct list_head *next;
182ec4ee 233
e631ddba
FH
234 /* Take the next block off the 'free' list */
235
236 if (list_empty(&c->free_list)) {
237
238 if (!c->nr_erasing_blocks &&
239 !list_empty(&c->erasable_list)) {
240 struct jffs2_eraseblock *ejeb;
241
242 ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
f116629d 243 list_move_tail(&ejeb->list, &c->erase_pending_list);
e631ddba 244 c->nr_erasing_blocks++;
ae3b6ba0 245 jffs2_garbage_collect_trigger(c);
9c261b33
JP
246 jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
247 __func__, ejeb->offset);
e631ddba
FH
248 }
249
250 if (!c->nr_erasing_blocks &&
251 !list_empty(&c->erasable_pending_wbuf_list)) {
9c261b33
JP
252 jffs2_dbg(1, "%s(): Flushing write buffer\n",
253 __func__);
e631ddba 254 /* c->nextblock is NULL, no update to c->nextblock allowed */
1da177e4 255 spin_unlock(&c->erase_completion_lock);
1da177e4
LT
256 jffs2_flush_wbuf_pad(c);
257 spin_lock(&c->erase_completion_lock);
e631ddba
FH
258 /* Have another go. It'll be on the erasable_list now */
259 return -EAGAIN;
1da177e4 260 }
e631ddba
FH
261
262 if (!c->nr_erasing_blocks) {
263 /* Ouch. We're in GC, or we wouldn't have got here.
264 And there's no space left. At all. */
da320f05
JP
265 pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
266 c->nr_erasing_blocks, c->nr_free_blocks,
267 list_empty(&c->erasable_list) ? "yes" : "no",
268 list_empty(&c->erasing_list) ? "yes" : "no",
269 list_empty(&c->erase_pending_list) ? "yes" : "no");
e631ddba 270 return -ENOSPC;
1da177e4 271 }
e631ddba
FH
272
273 spin_unlock(&c->erase_completion_lock);
274 /* Don't wait for it; just erase one right now */
275 jffs2_erase_pending_blocks(c, 1);
276 spin_lock(&c->erase_completion_lock);
277
278 /* An erase may have failed, decreasing the
279 amount of free space available. So we must
280 restart from the beginning */
281 return -EAGAIN;
1da177e4 282 }
e631ddba
FH
283
284 next = c->free_list.next;
285 list_del(next);
286 c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
287 c->nr_free_blocks--;
182ec4ee 288
e631ddba
FH
289 jffs2_sum_reset_collected(c->summary); /* reset collected summary */
290
f04de505 291#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
5bf17237
AB
292 /* adjust write buffer offset, else we get a non contiguous write bug */
293 if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
294 c->wbuf_ofs = 0xffffffff;
f04de505 295#endif
5bf17237 296
9c261b33
JP
297 jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
298 __func__, c->nextblock->offset);
e631ddba
FH
299
300 return 0;
301}
302
303/* Called with alloc sem _and_ erase_completion_lock */
9fe4854c
DW
304static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
305 uint32_t *len, uint32_t sumsize)
e631ddba
FH
306{
307 struct jffs2_eraseblock *jeb = c->nextblock;
9fe4854c 308 uint32_t reserved_size; /* for summary information at the end of the jeb */
e631ddba
FH
309 int ret;
310
311 restart:
312 reserved_size = 0;
313
314 if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
315 /* NOSUM_SIZE means not to generate summary */
316
317 if (jeb) {
318 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
733802d9 319 dbg_summary("minsize=%d , jeb->free=%d ,"
e631ddba
FH
320 "summary->size=%d , sumsize=%d\n",
321 minsize, jeb->free_size,
322 c->summary->sum_size, sumsize);
323 }
324
325 /* Is there enough space for writing out the current node, or we have to
326 write out summary information now, close this jeb and select new nextblock? */
327 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
328 JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
329
330 /* Has summary been disabled for this jeb? */
331 if (jffs2_sum_is_disabled(c->summary)) {
332 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
333 goto restart;
1da177e4
LT
334 }
335
e631ddba 336 /* Writing out the collected summary information */
733802d9 337 dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
e631ddba
FH
338 ret = jffs2_sum_write_sumnode(c);
339
340 if (ret)
341 return ret;
342
343 if (jffs2_sum_is_disabled(c->summary)) {
344 /* jffs2_write_sumnode() couldn't write out the summary information
345 diabling summary for this jeb and free the collected information
346 */
347 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
348 goto restart;
349 }
350
351 jffs2_close_nextblock(c, jeb);
352 jeb = NULL;
34c0e906
FH
353 /* keep always valid value in reserved_size */
354 reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
e631ddba
FH
355 }
356 } else {
357 if (jeb && minsize > jeb->free_size) {
fc6612f6
DW
358 uint32_t waste;
359
e631ddba
FH
360 /* Skip the end of this block and file it as having some dirty space */
361 /* If there's a pending write to it, flush now */
362
363 if (jffs2_wbuf_dirty(c)) {
1da177e4 364 spin_unlock(&c->erase_completion_lock);
9c261b33
JP
365 jffs2_dbg(1, "%s(): Flushing write buffer\n",
366 __func__);
1da177e4
LT
367 jffs2_flush_wbuf_pad(c);
368 spin_lock(&c->erase_completion_lock);
e631ddba
FH
369 jeb = c->nextblock;
370 goto restart;
1da177e4
LT
371 }
372
fc6612f6
DW
373 spin_unlock(&c->erase_completion_lock);
374
375 ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
376 if (ret)
377 return ret;
378 /* Just lock it again and continue. Nothing much can change because
379 we hold c->alloc_sem anyway. In fact, it's not entirely clear why
380 we hold c->erase_completion_lock in the majority of this function...
381 but that's a question for another (more caffeine-rich) day. */
382 spin_lock(&c->erase_completion_lock);
383
384 waste = jeb->free_size;
385 jffs2_link_node_ref(c, jeb,
386 (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
387 waste, NULL);
388 /* FIXME: that made it count as dirty. Convert to wasted */
389 jeb->dirty_size -= waste;
390 c->dirty_size -= waste;
391 jeb->wasted_size += waste;
392 c->wasted_size += waste;
1da177e4 393
e631ddba
FH
394 jffs2_close_nextblock(c, jeb);
395 jeb = NULL;
1da177e4 396 }
e631ddba
FH
397 }
398
399 if (!jeb) {
400
401 ret = jffs2_find_nextblock(c);
402 if (ret)
403 return ret;
1da177e4 404
e631ddba 405 jeb = c->nextblock;
1da177e4
LT
406
407 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
da320f05
JP
408 pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
409 jeb->offset, jeb->free_size);
1da177e4
LT
410 goto restart;
411 }
412 }
413 /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
414 enough space */
e631ddba 415 *len = jeb->free_size - reserved_size;
1da177e4
LT
416
417 if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
418 !jeb->first_node->next_in_ino) {
182ec4ee 419 /* Only node in it beforehand was a CLEANMARKER node (we think).
1da177e4 420 So mark it obsolete now that there's going to be another node
182ec4ee 421 in the block. This will reduce used_size to zero but We've
1da177e4
LT
422 already set c->nextblock so that jffs2_mark_node_obsolete()
423 won't try to refile it to the dirty_list.
424 */
425 spin_unlock(&c->erase_completion_lock);
426 jffs2_mark_node_obsolete(c, jeb->first_node);
427 spin_lock(&c->erase_completion_lock);
428 }
429
9c261b33
JP
430 jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
431 __func__,
432 *len, jeb->offset + (c->sector_size - jeb->free_size));
1da177e4
LT
433 return 0;
434}
435
436/**
437 * jffs2_add_physical_node_ref - add a physical node reference to the list
438 * @c: superblock info
439 * @new: new node reference to add
440 * @len: length of this physical node
1da177e4 441 *
182ec4ee 442 * Should only be used to report nodes for which space has been allocated
1da177e4
LT
443 * by jffs2_reserve_space.
444 *
445 * Must be called with the alloc_sem held.
446 */
182ec4ee 447
2f785402
DW
448struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
449 uint32_t ofs, uint32_t len,
450 struct jffs2_inode_cache *ic)
1da177e4
LT
451{
452 struct jffs2_eraseblock *jeb;
2f785402 453 struct jffs2_raw_node_ref *new;
1da177e4 454
2f785402 455 jeb = &c->blocks[ofs / c->sector_size];
1da177e4 456
9c261b33
JP
457 jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
458 __func__, ofs & ~3, ofs & 3, len);
1da177e4 459#if 1
2f785402
DW
460 /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
461 if c->nextblock is set. Note that wbuf.c will file obsolete nodes
462 even after refiling c->nextblock */
463 if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
464 && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
da320f05
JP
465 pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
466 ofs & ~3, ofs & 3);
66bfaeaa 467 if (c->nextblock)
da320f05 468 pr_warn("nextblock 0x%08x", c->nextblock->offset);
66bfaeaa 469 else
da320f05
JP
470 pr_warn("No nextblock");
471 pr_cont(", expected at %08x\n",
472 jeb->offset + (c->sector_size - jeb->free_size));
2f785402 473 return ERR_PTR(-EINVAL);
1da177e4
LT
474 }
475#endif
476 spin_lock(&c->erase_completion_lock);
477
2f785402 478 new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
1da177e4 479
9b88f473 480 if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
1da177e4 481 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
9c261b33
JP
482 jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
483 jeb->offset, jeb->free_size, jeb->dirty_size,
484 jeb->used_size);
1da177e4
LT
485 if (jffs2_wbuf_dirty(c)) {
486 /* Flush the last write in the block if it's outstanding */
487 spin_unlock(&c->erase_completion_lock);
488 jffs2_flush_wbuf_pad(c);
489 spin_lock(&c->erase_completion_lock);
490 }
491
492 list_add_tail(&jeb->list, &c->clean_list);
493 c->nextblock = NULL;
494 }
e0c8e42f
AB
495 jffs2_dbg_acct_sanity_check_nolock(c,jeb);
496 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4
LT
497
498 spin_unlock(&c->erase_completion_lock);
499
2f785402 500 return new;
1da177e4
LT
501}
502
503
504void jffs2_complete_reservation(struct jffs2_sb_info *c)
505{
9c261b33 506 jffs2_dbg(1, "jffs2_complete_reservation()\n");
acb64a43 507 spin_lock(&c->erase_completion_lock);
1da177e4 508 jffs2_garbage_collect_trigger(c);
acb64a43 509 spin_unlock(&c->erase_completion_lock);
ced22070 510 mutex_unlock(&c->alloc_sem);
1da177e4
LT
511}
512
513static inline int on_list(struct list_head *obj, struct list_head *head)
514{
515 struct list_head *this;
516
517 list_for_each(this, head) {
518 if (this == obj) {
9c261b33 519 jffs2_dbg(1, "%p is on list at %p\n", obj, head);
1da177e4
LT
520 return 1;
521
522 }
523 }
524 return 0;
525}
526
527void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
528{
529 struct jffs2_eraseblock *jeb;
530 int blocknr;
531 struct jffs2_unknown_node n;
532 int ret, addedsize;
533 size_t retlen;
1417fc44 534 uint32_t freed_len;
1da177e4 535
9bfeb691 536 if(unlikely(!ref)) {
da320f05 537 pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
1da177e4
LT
538 return;
539 }
540 if (ref_obsolete(ref)) {
9c261b33
JP
541 jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
542 __func__, ref_offset(ref));
1da177e4
LT
543 return;
544 }
545 blocknr = ref->flash_offset / c->sector_size;
546 if (blocknr >= c->nr_blocks) {
da320f05
JP
547 pr_notice("raw node at 0x%08x is off the end of device!\n",
548 ref->flash_offset);
1da177e4
LT
549 BUG();
550 }
551 jeb = &c->blocks[blocknr];
552
553 if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
31fbdf7a 554 !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
182ec4ee
TG
555 /* Hm. This may confuse static lock analysis. If any of the above
556 three conditions is false, we're going to return from this
1da177e4
LT
557 function without actually obliterating any nodes or freeing
558 any jffs2_raw_node_refs. So we don't need to stop erases from
559 happening, or protect against people holding an obsolete
560 jffs2_raw_node_ref without the erase_completion_lock. */
ced22070 561 mutex_lock(&c->erase_free_sem);
1da177e4
LT
562 }
563
564 spin_lock(&c->erase_completion_lock);
565
1417fc44
DW
566 freed_len = ref_totlen(c, jeb, ref);
567
1da177e4 568 if (ref_flags(ref) == REF_UNCHECKED) {
1417fc44 569 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
da320f05
JP
570 pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
571 freed_len, blocknr,
572 ref->flash_offset, jeb->used_size);
1da177e4
LT
573 BUG();
574 })
9c261b33
JP
575 jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
576 ref_offset(ref), freed_len);
1417fc44
DW
577 jeb->unchecked_size -= freed_len;
578 c->unchecked_size -= freed_len;
1da177e4 579 } else {
1417fc44 580 D1(if (unlikely(jeb->used_size < freed_len)) {
da320f05
JP
581 pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
582 freed_len, blocknr,
583 ref->flash_offset, jeb->used_size);
1da177e4
LT
584 BUG();
585 })
9c261b33
JP
586 jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
587 ref_offset(ref), freed_len);
1417fc44
DW
588 jeb->used_size -= freed_len;
589 c->used_size -= freed_len;
1da177e4
LT
590 }
591
592 // Take care, that wasted size is taken into concern
1417fc44 593 if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
9c261b33 594 jffs2_dbg(1, "Dirtying\n");
1417fc44
DW
595 addedsize = freed_len;
596 jeb->dirty_size += freed_len;
597 c->dirty_size += freed_len;
1da177e4
LT
598
599 /* Convert wasted space to dirty, if not a bad block */
600 if (jeb->wasted_size) {
601 if (on_list(&jeb->list, &c->bad_used_list)) {
9c261b33
JP
602 jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
603 jeb->offset);
1da177e4
LT
604 addedsize = 0; /* To fool the refiling code later */
605 } else {
9c261b33
JP
606 jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
607 jeb->wasted_size, jeb->offset);
1da177e4
LT
608 addedsize += jeb->wasted_size;
609 jeb->dirty_size += jeb->wasted_size;
610 c->dirty_size += jeb->wasted_size;
611 c->wasted_size -= jeb->wasted_size;
612 jeb->wasted_size = 0;
613 }
614 }
615 } else {
9c261b33 616 jffs2_dbg(1, "Wasting\n");
1da177e4 617 addedsize = 0;
1417fc44
DW
618 jeb->wasted_size += freed_len;
619 c->wasted_size += freed_len;
1da177e4
LT
620 }
621 ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
182ec4ee 622
e0c8e42f
AB
623 jffs2_dbg_acct_sanity_check_nolock(c, jeb);
624 jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
1da177e4 625
31fbdf7a
AB
626 if (c->flags & JFFS2_SB_FLAG_SCANNING) {
627 /* Flash scanning is in progress. Don't muck about with the block
1da177e4 628 lists because they're not ready yet, and don't actually
182ec4ee 629 obliterate nodes that look obsolete. If they weren't
1da177e4
LT
630 marked obsolete on the flash at the time they _became_
631 obsolete, there was probably a reason for that. */
632 spin_unlock(&c->erase_completion_lock);
633 /* We didn't lock the erase_free_sem */
634 return;
635 }
636
637 if (jeb == c->nextblock) {
9c261b33
JP
638 jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
639 jeb->offset);
1da177e4
LT
640 } else if (!jeb->used_size && !jeb->unchecked_size) {
641 if (jeb == c->gcblock) {
9c261b33
JP
642 jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
643 jeb->offset);
1da177e4
LT
644 c->gcblock = NULL;
645 } else {
9c261b33
JP
646 jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
647 jeb->offset);
1da177e4
LT
648 list_del(&jeb->list);
649 }
650 if (jffs2_wbuf_dirty(c)) {
9c261b33 651 jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
1da177e4
LT
652 list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
653 } else {
654 if (jiffies & 127) {
655 /* Most of the time, we just erase it immediately. Otherwise we
656 spend ages scanning it on mount, etc. */
9c261b33 657 jffs2_dbg(1, "...and adding to erase_pending_list\n");
1da177e4
LT
658 list_add_tail(&jeb->list, &c->erase_pending_list);
659 c->nr_erasing_blocks++;
ae3b6ba0 660 jffs2_garbage_collect_trigger(c);
1da177e4
LT
661 } else {
662 /* Sometimes, however, we leave it elsewhere so it doesn't get
663 immediately reused, and we spread the load a bit. */
9c261b33 664 jffs2_dbg(1, "...and adding to erasable_list\n");
1da177e4 665 list_add_tail(&jeb->list, &c->erasable_list);
182ec4ee 666 }
1da177e4 667 }
9c261b33 668 jffs2_dbg(1, "Done OK\n");
1da177e4 669 } else if (jeb == c->gcblock) {
9c261b33
JP
670 jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
671 jeb->offset);
1da177e4 672 } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
9c261b33
JP
673 jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
674 jeb->offset);
1da177e4 675 list_del(&jeb->list);
9c261b33 676 jffs2_dbg(1, "...and adding to dirty_list\n");
1da177e4
LT
677 list_add_tail(&jeb->list, &c->dirty_list);
678 } else if (VERYDIRTY(c, jeb->dirty_size) &&
679 !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
9c261b33
JP
680 jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
681 jeb->offset);
1da177e4 682 list_del(&jeb->list);
9c261b33 683 jffs2_dbg(1, "...and adding to very_dirty_list\n");
1da177e4
LT
684 list_add_tail(&jeb->list, &c->very_dirty_list);
685 } else {
9c261b33
JP
686 jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
687 jeb->offset, jeb->free_size, jeb->dirty_size,
688 jeb->used_size);
182ec4ee 689 }
1da177e4
LT
690
691 spin_unlock(&c->erase_completion_lock);
692
31fbdf7a
AB
693 if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
694 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
1da177e4
LT
695 /* We didn't lock the erase_free_sem */
696 return;
697 }
698
699 /* The erase_free_sem is locked, and has been since before we marked the node obsolete
700 and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
701 the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
c38c1b61 702 by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
1da177e4 703
9c261b33
JP
704 jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
705 ref_offset(ref));
1da177e4
LT
706 ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
707 if (ret) {
da320f05
JP
708 pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
709 ref_offset(ref), ret);
1da177e4
LT
710 goto out_erase_sem;
711 }
712 if (retlen != sizeof(n)) {
da320f05
JP
713 pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
714 ref_offset(ref), retlen);
1da177e4
LT
715 goto out_erase_sem;
716 }
1417fc44 717 if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
da320f05
JP
718 pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
719 je32_to_cpu(n.totlen), freed_len);
1da177e4
LT
720 goto out_erase_sem;
721 }
722 if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
9c261b33
JP
723 jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
724 ref_offset(ref), je16_to_cpu(n.nodetype));
1da177e4
LT
725 goto out_erase_sem;
726 }
727 /* XXX FIXME: This is ugly now */
728 n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
729 ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
730 if (ret) {
da320f05
JP
731 pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
732 ref_offset(ref), ret);
1da177e4
LT
733 goto out_erase_sem;
734 }
735 if (retlen != sizeof(n)) {
da320f05
JP
736 pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
737 ref_offset(ref), retlen);
1da177e4
LT
738 goto out_erase_sem;
739 }
740
741 /* Nodes which have been marked obsolete no longer need to be
742 associated with any inode. Remove them from the per-inode list.
182ec4ee
TG
743
744 Note we can't do this for NAND at the moment because we need
1da177e4
LT
745 obsolete dirent nodes to stay on the lists, because of the
746 horridness in jffs2_garbage_collect_deletion_dirent(). Also
182ec4ee 747 because we delete the inocache, and on NAND we need that to
1da177e4
LT
748 stay around until all the nodes are actually erased, in order
749 to stop us from giving the same inode number to another newly
750 created inode. */
751 if (ref->next_in_ino) {
752 struct jffs2_inode_cache *ic;
753 struct jffs2_raw_node_ref **p;
754
755 spin_lock(&c->erase_completion_lock);
756
757 ic = jffs2_raw_ref_to_ic(ref);
758 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
759 ;
760
761 *p = ref->next_in_ino;
762 ref->next_in_ino = NULL;
763
c9f700f8
KK
764 switch (ic->class) {
765#ifdef CONFIG_JFFS2_FS_XATTR
766 case RAWNODE_CLASS_XATTR_DATUM:
767 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
768 break;
769 case RAWNODE_CLASS_XATTR_REF:
770 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
771 break;
772#endif
773 default:
27c72b04 774 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
c9f700f8
KK
775 jffs2_del_ino_cache(c, ic);
776 break;
777 }
1da177e4
LT
778 spin_unlock(&c->erase_completion_lock);
779 }
780
1da177e4 781 out_erase_sem:
ced22070 782 mutex_unlock(&c->erase_free_sem);
1da177e4
LT
783}
784
1da177e4
LT
785int jffs2_thread_should_wake(struct jffs2_sb_info *c)
786{
787 int ret = 0;
788 uint32_t dirty;
8fb870df
DW
789 int nr_very_dirty = 0;
790 struct jffs2_eraseblock *jeb;
1da177e4 791
d6ce1710
JT
792 if (!list_empty(&c->erase_complete_list) ||
793 !list_empty(&c->erase_pending_list))
794 return 1;
795
1da177e4 796 if (c->unchecked_size) {
9c261b33
JP
797 jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
798 c->unchecked_size, c->checked_ino);
1da177e4
LT
799 return 1;
800 }
801
802 /* dirty_size contains blocks on erase_pending_list
803 * those blocks are counted in c->nr_erasing_blocks.
804 * If one block is actually erased, it is not longer counted as dirty_space
805 * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
806 * with c->nr_erasing_blocks * c->sector_size again.
807 * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
808 * This helps us to force gc and pick eventually a clean block to spread the load.
809 */
810 dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
811
182ec4ee
TG
812 if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
813 (dirty > c->nospc_dirty_size))
1da177e4
LT
814 ret = 1;
815
8fb870df
DW
816 list_for_each_entry(jeb, &c->very_dirty_list, list) {
817 nr_very_dirty++;
818 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
819 ret = 1;
a8c68f32
DW
820 /* In debug mode, actually go through and count them all */
821 D1(continue);
822 break;
8fb870df
DW
823 }
824 }
825
9c261b33
JP
826 jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
827 __func__, c->nr_free_blocks, c->nr_erasing_blocks,
828 c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
1da177e4
LT
829
830 return ret;
831}
This page took 0.531573 seconds and 5 git commands to generate.