Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * JFFS2 -- Journalling Flash File System, Version 2. | |
3 | * | |
4 | * Copyright (C) 2001-2003 Red Hat, Inc. | |
5 | * | |
6 | * Created by David Woodhouse <dwmw2@infradead.org> | |
7 | * | |
8 | * For licensing information, see the file 'LICENCE' in this directory. | |
9 | * | |
67e345d1 | 10 | * $Id: nodemgmt.c,v 1.118 2005/02/27 23:01:32 dwmw2 Exp $ |
1da177e4 LT |
11 | * |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/mtd/mtd.h> | |
17 | #include <linux/compiler.h> | |
18 | #include <linux/sched.h> /* For cond_resched() */ | |
19 | #include "nodelist.h" | |
20 | ||
21 | /** | |
22 | * jffs2_reserve_space - request physical space to write nodes to flash | |
23 | * @c: superblock info | |
24 | * @minsize: Minimum acceptable size of allocation | |
25 | * @ofs: Returned value of node offset | |
26 | * @len: Returned value of allocation length | |
27 | * @prio: Allocation type - ALLOC_{NORMAL,DELETION} | |
28 | * | |
29 | * Requests a block of physical space on the flash. Returns zero for success | |
30 | * and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC | |
31 | * or other error if appropriate. | |
32 | * | |
33 | * If it returns zero, jffs2_reserve_space() also downs the per-filesystem | |
34 | * allocation semaphore, to prevent more than one allocation from being | |
35 | * active at any time. The semaphore is later released by jffs2_commit_allocation() | |
36 | * | |
37 | * jffs2_reserve_space() may trigger garbage collection in order to make room | |
38 | * for the requested allocation. | |
39 | */ | |
40 | ||
41 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len); | |
42 | ||
43 | int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio) | |
44 | { | |
45 | int ret = -EAGAIN; | |
46 | int blocksneeded = c->resv_blocks_write; | |
47 | /* align it */ | |
48 | minsize = PAD(minsize); | |
49 | ||
50 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize)); | |
51 | down(&c->alloc_sem); | |
52 | ||
53 | D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n")); | |
54 | ||
55 | spin_lock(&c->erase_completion_lock); | |
56 | ||
57 | /* this needs a little more thought (true <tglx> :)) */ | |
58 | while(ret == -EAGAIN) { | |
59 | while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { | |
60 | int ret; | |
61 | uint32_t dirty, avail; | |
62 | ||
63 | /* calculate real dirty size | |
64 | * dirty_size contains blocks on erase_pending_list | |
65 | * those blocks are counted in c->nr_erasing_blocks. | |
66 | * If one block is actually erased, it is not longer counted as dirty_space | |
67 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it | |
68 | * with c->nr_erasing_blocks * c->sector_size again. | |
69 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks | |
70 | * This helps us to force gc and pick eventually a clean block to spread the load. | |
71 | * We add unchecked_size here, as we hopefully will find some space to use. | |
72 | * This will affect the sum only once, as gc first finishes checking | |
73 | * of nodes. | |
74 | */ | |
75 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size; | |
76 | if (dirty < c->nospc_dirty_size) { | |
77 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | |
78 | printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"); | |
79 | break; | |
80 | } | |
81 | D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n", | |
82 | dirty, c->unchecked_size, c->sector_size)); | |
83 | ||
84 | spin_unlock(&c->erase_completion_lock); | |
85 | up(&c->alloc_sem); | |
86 | return -ENOSPC; | |
87 | } | |
88 | ||
89 | /* Calc possibly available space. Possibly available means that we | |
90 | * don't know, if unchecked size contains obsoleted nodes, which could give us some | |
91 | * more usable space. This will affect the sum only once, as gc first finishes checking | |
92 | * of nodes. | |
93 | + Return -ENOSPC, if the maximum possibly available space is less or equal than | |
94 | * blocksneeded * sector_size. | |
95 | * This blocks endless gc looping on a filesystem, which is nearly full, even if | |
96 | * the check above passes. | |
97 | */ | |
98 | avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; | |
99 | if ( (avail / c->sector_size) <= blocksneeded) { | |
100 | if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) { | |
101 | printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"); | |
102 | break; | |
103 | } | |
104 | ||
105 | D1(printk(KERN_DEBUG "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n", | |
106 | avail, blocksneeded * c->sector_size)); | |
107 | spin_unlock(&c->erase_completion_lock); | |
108 | up(&c->alloc_sem); | |
109 | return -ENOSPC; | |
110 | } | |
111 | ||
112 | up(&c->alloc_sem); | |
113 | ||
114 | D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n", | |
115 | c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size, | |
116 | c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size)); | |
117 | spin_unlock(&c->erase_completion_lock); | |
118 | ||
119 | ret = jffs2_garbage_collect_pass(c); | |
120 | if (ret) | |
121 | return ret; | |
122 | ||
123 | cond_resched(); | |
124 | ||
125 | if (signal_pending(current)) | |
126 | return -EINTR; | |
127 | ||
128 | down(&c->alloc_sem); | |
129 | spin_lock(&c->erase_completion_lock); | |
130 | } | |
131 | ||
132 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | |
133 | if (ret) { | |
134 | D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret)); | |
135 | } | |
136 | } | |
137 | spin_unlock(&c->erase_completion_lock); | |
138 | if (ret) | |
139 | up(&c->alloc_sem); | |
140 | return ret; | |
141 | } | |
142 | ||
143 | int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | |
144 | { | |
145 | int ret = -EAGAIN; | |
146 | minsize = PAD(minsize); | |
147 | ||
148 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize)); | |
149 | ||
150 | spin_lock(&c->erase_completion_lock); | |
151 | while(ret == -EAGAIN) { | |
152 | ret = jffs2_do_reserve_space(c, minsize, ofs, len); | |
153 | if (ret) { | |
154 | D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret)); | |
155 | } | |
156 | } | |
157 | spin_unlock(&c->erase_completion_lock); | |
158 | return ret; | |
159 | } | |
160 | ||
161 | /* Called with alloc sem _and_ erase_completion_lock */ | |
162 | static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len) | |
163 | { | |
164 | struct jffs2_eraseblock *jeb = c->nextblock; | |
165 | ||
166 | restart: | |
167 | if (jeb && minsize > jeb->free_size) { | |
168 | /* Skip the end of this block and file it as having some dirty space */ | |
169 | /* If there's a pending write to it, flush now */ | |
170 | if (jffs2_wbuf_dirty(c)) { | |
171 | spin_unlock(&c->erase_completion_lock); | |
172 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | |
173 | jffs2_flush_wbuf_pad(c); | |
174 | spin_lock(&c->erase_completion_lock); | |
175 | jeb = c->nextblock; | |
176 | goto restart; | |
177 | } | |
178 | c->wasted_size += jeb->free_size; | |
179 | c->free_size -= jeb->free_size; | |
180 | jeb->wasted_size += jeb->free_size; | |
181 | jeb->free_size = 0; | |
182 | ||
183 | /* Check, if we have a dirty block now, or if it was dirty already */ | |
184 | if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) { | |
185 | c->dirty_size += jeb->wasted_size; | |
186 | c->wasted_size -= jeb->wasted_size; | |
187 | jeb->dirty_size += jeb->wasted_size; | |
188 | jeb->wasted_size = 0; | |
189 | if (VERYDIRTY(c, jeb->dirty_size)) { | |
190 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | |
191 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | |
192 | list_add_tail(&jeb->list, &c->very_dirty_list); | |
193 | } else { | |
194 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | |
195 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | |
196 | list_add_tail(&jeb->list, &c->dirty_list); | |
197 | } | |
198 | } else { | |
199 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | |
200 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | |
201 | list_add_tail(&jeb->list, &c->clean_list); | |
202 | } | |
203 | c->nextblock = jeb = NULL; | |
204 | } | |
205 | ||
206 | if (!jeb) { | |
207 | struct list_head *next; | |
208 | /* Take the next block off the 'free' list */ | |
209 | ||
210 | if (list_empty(&c->free_list)) { | |
211 | ||
212 | if (!c->nr_erasing_blocks && | |
213 | !list_empty(&c->erasable_list)) { | |
214 | struct jffs2_eraseblock *ejeb; | |
215 | ||
216 | ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list); | |
217 | list_del(&ejeb->list); | |
218 | list_add_tail(&ejeb->list, &c->erase_pending_list); | |
219 | c->nr_erasing_blocks++; | |
220 | jffs2_erase_pending_trigger(c); | |
221 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n", | |
222 | ejeb->offset)); | |
223 | } | |
224 | ||
225 | if (!c->nr_erasing_blocks && | |
226 | !list_empty(&c->erasable_pending_wbuf_list)) { | |
227 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n")); | |
228 | /* c->nextblock is NULL, no update to c->nextblock allowed */ | |
229 | spin_unlock(&c->erase_completion_lock); | |
230 | jffs2_flush_wbuf_pad(c); | |
231 | spin_lock(&c->erase_completion_lock); | |
232 | /* Have another go. It'll be on the erasable_list now */ | |
233 | return -EAGAIN; | |
234 | } | |
235 | ||
236 | if (!c->nr_erasing_blocks) { | |
237 | /* Ouch. We're in GC, or we wouldn't have got here. | |
238 | And there's no space left. At all. */ | |
239 | printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n", | |
240 | c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no", | |
241 | list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"); | |
242 | return -ENOSPC; | |
243 | } | |
244 | ||
245 | spin_unlock(&c->erase_completion_lock); | |
246 | /* Don't wait for it; just erase one right now */ | |
247 | jffs2_erase_pending_blocks(c, 1); | |
248 | spin_lock(&c->erase_completion_lock); | |
249 | ||
250 | /* An erase may have failed, decreasing the | |
251 | amount of free space available. So we must | |
252 | restart from the beginning */ | |
253 | return -EAGAIN; | |
254 | } | |
255 | ||
256 | next = c->free_list.next; | |
257 | list_del(next); | |
258 | c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list); | |
259 | c->nr_free_blocks--; | |
260 | ||
261 | if (jeb->free_size != c->sector_size - c->cleanmarker_size) { | |
262 | printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size); | |
263 | goto restart; | |
264 | } | |
265 | } | |
266 | /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has | |
267 | enough space */ | |
268 | *ofs = jeb->offset + (c->sector_size - jeb->free_size); | |
269 | *len = jeb->free_size; | |
270 | ||
271 | if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size && | |
272 | !jeb->first_node->next_in_ino) { | |
273 | /* Only node in it beforehand was a CLEANMARKER node (we think). | |
274 | So mark it obsolete now that there's going to be another node | |
275 | in the block. This will reduce used_size to zero but We've | |
276 | already set c->nextblock so that jffs2_mark_node_obsolete() | |
277 | won't try to refile it to the dirty_list. | |
278 | */ | |
279 | spin_unlock(&c->erase_completion_lock); | |
280 | jffs2_mark_node_obsolete(c, jeb->first_node); | |
281 | spin_lock(&c->erase_completion_lock); | |
282 | } | |
283 | ||
284 | D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs)); | |
285 | return 0; | |
286 | } | |
287 | ||
288 | /** | |
289 | * jffs2_add_physical_node_ref - add a physical node reference to the list | |
290 | * @c: superblock info | |
291 | * @new: new node reference to add | |
292 | * @len: length of this physical node | |
293 | * @dirty: dirty flag for new node | |
294 | * | |
295 | * Should only be used to report nodes for which space has been allocated | |
296 | * by jffs2_reserve_space. | |
297 | * | |
298 | * Must be called with the alloc_sem held. | |
299 | */ | |
300 | ||
301 | int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new) | |
302 | { | |
303 | struct jffs2_eraseblock *jeb; | |
304 | uint32_t len; | |
305 | ||
306 | jeb = &c->blocks[new->flash_offset / c->sector_size]; | |
307 | len = ref_totlen(c, jeb, new); | |
308 | ||
309 | D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len)); | |
310 | #if 1 | |
3118db3d EH |
311 | /* we could get some obsolete nodes after nextblock was refiled |
312 | in wbuf.c */ | |
9b88f473 EH |
313 | if ((c->nextblock || !ref_obsolete(new)) |
314 | &&(jeb != c->nextblock || ref_offset(new) != jeb->offset + (c->sector_size - jeb->free_size))) { | |
1da177e4 LT |
315 | printk(KERN_WARNING "argh. node added in wrong place\n"); |
316 | jffs2_free_raw_node_ref(new); | |
317 | return -EINVAL; | |
318 | } | |
319 | #endif | |
320 | spin_lock(&c->erase_completion_lock); | |
321 | ||
322 | if (!jeb->first_node) | |
323 | jeb->first_node = new; | |
324 | if (jeb->last_node) | |
325 | jeb->last_node->next_phys = new; | |
326 | jeb->last_node = new; | |
327 | ||
328 | jeb->free_size -= len; | |
329 | c->free_size -= len; | |
330 | if (ref_obsolete(new)) { | |
331 | jeb->dirty_size += len; | |
332 | c->dirty_size += len; | |
333 | } else { | |
334 | jeb->used_size += len; | |
335 | c->used_size += len; | |
336 | } | |
337 | ||
9b88f473 | 338 | if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) { |
1da177e4 LT |
339 | /* If it lives on the dirty_list, jffs2_reserve_space will put it there */ |
340 | D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n", | |
341 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | |
342 | if (jffs2_wbuf_dirty(c)) { | |
343 | /* Flush the last write in the block if it's outstanding */ | |
344 | spin_unlock(&c->erase_completion_lock); | |
345 | jffs2_flush_wbuf_pad(c); | |
346 | spin_lock(&c->erase_completion_lock); | |
347 | } | |
348 | ||
349 | list_add_tail(&jeb->list, &c->clean_list); | |
350 | c->nextblock = NULL; | |
351 | } | |
352 | ACCT_SANITY_CHECK(c,jeb); | |
353 | D1(ACCT_PARANOIA_CHECK(jeb)); | |
354 | ||
355 | spin_unlock(&c->erase_completion_lock); | |
356 | ||
357 | return 0; | |
358 | } | |
359 | ||
360 | ||
361 | void jffs2_complete_reservation(struct jffs2_sb_info *c) | |
362 | { | |
363 | D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n")); | |
364 | jffs2_garbage_collect_trigger(c); | |
365 | up(&c->alloc_sem); | |
366 | } | |
367 | ||
368 | static inline int on_list(struct list_head *obj, struct list_head *head) | |
369 | { | |
370 | struct list_head *this; | |
371 | ||
372 | list_for_each(this, head) { | |
373 | if (this == obj) { | |
374 | D1(printk("%p is on list at %p\n", obj, head)); | |
375 | return 1; | |
376 | ||
377 | } | |
378 | } | |
379 | return 0; | |
380 | } | |
381 | ||
382 | void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref) | |
383 | { | |
384 | struct jffs2_eraseblock *jeb; | |
385 | int blocknr; | |
386 | struct jffs2_unknown_node n; | |
387 | int ret, addedsize; | |
388 | size_t retlen; | |
389 | ||
390 | if(!ref) { | |
391 | printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n"); | |
392 | return; | |
393 | } | |
394 | if (ref_obsolete(ref)) { | |
395 | D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref))); | |
396 | return; | |
397 | } | |
398 | blocknr = ref->flash_offset / c->sector_size; | |
399 | if (blocknr >= c->nr_blocks) { | |
400 | printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset); | |
401 | BUG(); | |
402 | } | |
403 | jeb = &c->blocks[blocknr]; | |
404 | ||
405 | if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) && | |
406 | !(c->flags & JFFS2_SB_FLAG_MOUNTING)) { | |
407 | /* Hm. This may confuse static lock analysis. If any of the above | |
408 | three conditions is false, we're going to return from this | |
409 | function without actually obliterating any nodes or freeing | |
410 | any jffs2_raw_node_refs. So we don't need to stop erases from | |
411 | happening, or protect against people holding an obsolete | |
412 | jffs2_raw_node_ref without the erase_completion_lock. */ | |
413 | down(&c->erase_free_sem); | |
414 | } | |
415 | ||
416 | spin_lock(&c->erase_completion_lock); | |
417 | ||
418 | if (ref_flags(ref) == REF_UNCHECKED) { | |
419 | D1(if (unlikely(jeb->unchecked_size < ref_totlen(c, jeb, ref))) { | |
420 | printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n", | |
421 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | |
422 | BUG(); | |
423 | }) | |
424 | D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | |
425 | jeb->unchecked_size -= ref_totlen(c, jeb, ref); | |
426 | c->unchecked_size -= ref_totlen(c, jeb, ref); | |
427 | } else { | |
428 | D1(if (unlikely(jeb->used_size < ref_totlen(c, jeb, ref))) { | |
429 | printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n", | |
430 | ref_totlen(c, jeb, ref), blocknr, ref->flash_offset, jeb->used_size); | |
431 | BUG(); | |
432 | }) | |
433 | D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref_totlen(c, jeb, ref))); | |
434 | jeb->used_size -= ref_totlen(c, jeb, ref); | |
435 | c->used_size -= ref_totlen(c, jeb, ref); | |
436 | } | |
437 | ||
438 | // Take care, that wasted size is taken into concern | |
439 | if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref_totlen(c, jeb, ref))) && jeb != c->nextblock) { | |
440 | D1(printk("Dirtying\n")); | |
441 | addedsize = ref_totlen(c, jeb, ref); | |
442 | jeb->dirty_size += ref_totlen(c, jeb, ref); | |
443 | c->dirty_size += ref_totlen(c, jeb, ref); | |
444 | ||
445 | /* Convert wasted space to dirty, if not a bad block */ | |
446 | if (jeb->wasted_size) { | |
447 | if (on_list(&jeb->list, &c->bad_used_list)) { | |
448 | D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n", | |
449 | jeb->offset)); | |
450 | addedsize = 0; /* To fool the refiling code later */ | |
451 | } else { | |
452 | D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n", | |
453 | jeb->wasted_size, jeb->offset)); | |
454 | addedsize += jeb->wasted_size; | |
455 | jeb->dirty_size += jeb->wasted_size; | |
456 | c->dirty_size += jeb->wasted_size; | |
457 | c->wasted_size -= jeb->wasted_size; | |
458 | jeb->wasted_size = 0; | |
459 | } | |
460 | } | |
461 | } else { | |
462 | D1(printk("Wasting\n")); | |
463 | addedsize = 0; | |
464 | jeb->wasted_size += ref_totlen(c, jeb, ref); | |
465 | c->wasted_size += ref_totlen(c, jeb, ref); | |
466 | } | |
467 | ref->flash_offset = ref_offset(ref) | REF_OBSOLETE; | |
468 | ||
469 | ACCT_SANITY_CHECK(c, jeb); | |
470 | ||
471 | D1(ACCT_PARANOIA_CHECK(jeb)); | |
472 | ||
473 | if (c->flags & JFFS2_SB_FLAG_MOUNTING) { | |
474 | /* Mount in progress. Don't muck about with the block | |
475 | lists because they're not ready yet, and don't actually | |
476 | obliterate nodes that look obsolete. If they weren't | |
477 | marked obsolete on the flash at the time they _became_ | |
478 | obsolete, there was probably a reason for that. */ | |
479 | spin_unlock(&c->erase_completion_lock); | |
480 | /* We didn't lock the erase_free_sem */ | |
481 | return; | |
482 | } | |
483 | ||
484 | if (jeb == c->nextblock) { | |
485 | D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset)); | |
486 | } else if (!jeb->used_size && !jeb->unchecked_size) { | |
487 | if (jeb == c->gcblock) { | |
488 | D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset)); | |
489 | c->gcblock = NULL; | |
490 | } else { | |
491 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset)); | |
492 | list_del(&jeb->list); | |
493 | } | |
494 | if (jffs2_wbuf_dirty(c)) { | |
495 | D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n")); | |
496 | list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list); | |
497 | } else { | |
498 | if (jiffies & 127) { | |
499 | /* Most of the time, we just erase it immediately. Otherwise we | |
500 | spend ages scanning it on mount, etc. */ | |
501 | D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n")); | |
502 | list_add_tail(&jeb->list, &c->erase_pending_list); | |
503 | c->nr_erasing_blocks++; | |
504 | jffs2_erase_pending_trigger(c); | |
505 | } else { | |
506 | /* Sometimes, however, we leave it elsewhere so it doesn't get | |
507 | immediately reused, and we spread the load a bit. */ | |
508 | D1(printk(KERN_DEBUG "...and adding to erasable_list\n")); | |
509 | list_add_tail(&jeb->list, &c->erasable_list); | |
510 | } | |
511 | } | |
512 | D1(printk(KERN_DEBUG "Done OK\n")); | |
513 | } else if (jeb == c->gcblock) { | |
514 | D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset)); | |
515 | } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) { | |
516 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset)); | |
517 | list_del(&jeb->list); | |
518 | D1(printk(KERN_DEBUG "...and adding to dirty_list\n")); | |
519 | list_add_tail(&jeb->list, &c->dirty_list); | |
520 | } else if (VERYDIRTY(c, jeb->dirty_size) && | |
521 | !VERYDIRTY(c, jeb->dirty_size - addedsize)) { | |
522 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset)); | |
523 | list_del(&jeb->list); | |
524 | D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n")); | |
525 | list_add_tail(&jeb->list, &c->very_dirty_list); | |
526 | } else { | |
527 | D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n", | |
528 | jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size)); | |
529 | } | |
530 | ||
531 | spin_unlock(&c->erase_completion_lock); | |
532 | ||
533 | if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c)) { | |
534 | /* We didn't lock the erase_free_sem */ | |
535 | return; | |
536 | } | |
537 | ||
538 | /* The erase_free_sem is locked, and has been since before we marked the node obsolete | |
539 | and potentially put its eraseblock onto the erase_pending_list. Thus, we know that | |
540 | the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet | |
541 | by jffs2_free_all_node_refs() in erase.c. Which is nice. */ | |
542 | ||
543 | D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref))); | |
544 | ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | |
545 | if (ret) { | |
546 | printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | |
547 | goto out_erase_sem; | |
548 | } | |
549 | if (retlen != sizeof(n)) { | |
550 | printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | |
551 | goto out_erase_sem; | |
552 | } | |
553 | if (PAD(je32_to_cpu(n.totlen)) != PAD(ref_totlen(c, jeb, ref))) { | |
554 | printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref_totlen(c, jeb, ref)); | |
555 | goto out_erase_sem; | |
556 | } | |
557 | if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) { | |
558 | D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype))); | |
559 | goto out_erase_sem; | |
560 | } | |
561 | /* XXX FIXME: This is ugly now */ | |
562 | n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE); | |
563 | ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n); | |
564 | if (ret) { | |
565 | printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret); | |
566 | goto out_erase_sem; | |
567 | } | |
568 | if (retlen != sizeof(n)) { | |
569 | printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen); | |
570 | goto out_erase_sem; | |
571 | } | |
572 | ||
573 | /* Nodes which have been marked obsolete no longer need to be | |
574 | associated with any inode. Remove them from the per-inode list. | |
575 | ||
576 | Note we can't do this for NAND at the moment because we need | |
577 | obsolete dirent nodes to stay on the lists, because of the | |
578 | horridness in jffs2_garbage_collect_deletion_dirent(). Also | |
579 | because we delete the inocache, and on NAND we need that to | |
580 | stay around until all the nodes are actually erased, in order | |
581 | to stop us from giving the same inode number to another newly | |
582 | created inode. */ | |
583 | if (ref->next_in_ino) { | |
584 | struct jffs2_inode_cache *ic; | |
585 | struct jffs2_raw_node_ref **p; | |
586 | ||
587 | spin_lock(&c->erase_completion_lock); | |
588 | ||
589 | ic = jffs2_raw_ref_to_ic(ref); | |
590 | for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) | |
591 | ; | |
592 | ||
593 | *p = ref->next_in_ino; | |
594 | ref->next_in_ino = NULL; | |
595 | ||
67e345d1 | 596 | if (ic->nodes == (void *)ic) |
1da177e4 | 597 | jffs2_del_ino_cache(c, ic); |
1da177e4 LT |
598 | |
599 | spin_unlock(&c->erase_completion_lock); | |
600 | } | |
601 | ||
602 | ||
603 | /* Merge with the next node in the physical list, if there is one | |
604 | and if it's also obsolete and if it doesn't belong to any inode */ | |
605 | if (ref->next_phys && ref_obsolete(ref->next_phys) && | |
606 | !ref->next_phys->next_in_ino) { | |
607 | struct jffs2_raw_node_ref *n = ref->next_phys; | |
608 | ||
609 | spin_lock(&c->erase_completion_lock); | |
610 | ||
611 | ref->__totlen += n->__totlen; | |
612 | ref->next_phys = n->next_phys; | |
613 | if (jeb->last_node == n) jeb->last_node = ref; | |
614 | if (jeb->gc_node == n) { | |
615 | /* gc will be happy continuing gc on this node */ | |
616 | jeb->gc_node=ref; | |
617 | } | |
618 | spin_unlock(&c->erase_completion_lock); | |
619 | ||
620 | jffs2_free_raw_node_ref(n); | |
621 | } | |
622 | ||
623 | /* Also merge with the previous node in the list, if there is one | |
624 | and that one is obsolete */ | |
625 | if (ref != jeb->first_node ) { | |
626 | struct jffs2_raw_node_ref *p = jeb->first_node; | |
627 | ||
628 | spin_lock(&c->erase_completion_lock); | |
629 | ||
630 | while (p->next_phys != ref) | |
631 | p = p->next_phys; | |
632 | ||
633 | if (ref_obsolete(p) && !ref->next_in_ino) { | |
634 | p->__totlen += ref->__totlen; | |
635 | if (jeb->last_node == ref) { | |
636 | jeb->last_node = p; | |
637 | } | |
638 | if (jeb->gc_node == ref) { | |
639 | /* gc will be happy continuing gc on this node */ | |
640 | jeb->gc_node=p; | |
641 | } | |
642 | p->next_phys = ref->next_phys; | |
643 | jffs2_free_raw_node_ref(ref); | |
644 | } | |
645 | spin_unlock(&c->erase_completion_lock); | |
646 | } | |
647 | out_erase_sem: | |
648 | up(&c->erase_free_sem); | |
649 | } | |
650 | ||
651 | #if CONFIG_JFFS2_FS_DEBUG >= 2 | |
652 | void jffs2_dump_block_lists(struct jffs2_sb_info *c) | |
653 | { | |
654 | ||
655 | ||
656 | printk(KERN_DEBUG "jffs2_dump_block_lists:\n"); | |
657 | printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size); | |
658 | printk(KERN_DEBUG "used_size: %08x\n", c->used_size); | |
659 | printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size); | |
660 | printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size); | |
661 | printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size); | |
662 | printk(KERN_DEBUG "free_size: %08x\n", c->free_size); | |
663 | printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size); | |
664 | printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size); | |
665 | printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size); | |
666 | printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * c->resv_blocks_write); | |
667 | ||
668 | if (c->nextblock) { | |
669 | printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
670 | c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size); | |
671 | } else { | |
672 | printk(KERN_DEBUG "nextblock: NULL\n"); | |
673 | } | |
674 | if (c->gcblock) { | |
675 | printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
676 | c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size); | |
677 | } else { | |
678 | printk(KERN_DEBUG "gcblock: NULL\n"); | |
679 | } | |
680 | if (list_empty(&c->clean_list)) { | |
681 | printk(KERN_DEBUG "clean_list: empty\n"); | |
682 | } else { | |
683 | struct list_head *this; | |
684 | int numblocks = 0; | |
685 | uint32_t dirty = 0; | |
686 | ||
687 | list_for_each(this, &c->clean_list) { | |
688 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
689 | numblocks ++; | |
690 | dirty += jeb->wasted_size; | |
691 | printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
692 | } | |
693 | printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks); | |
694 | } | |
695 | if (list_empty(&c->very_dirty_list)) { | |
696 | printk(KERN_DEBUG "very_dirty_list: empty\n"); | |
697 | } else { | |
698 | struct list_head *this; | |
699 | int numblocks = 0; | |
700 | uint32_t dirty = 0; | |
701 | ||
702 | list_for_each(this, &c->very_dirty_list) { | |
703 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
704 | numblocks ++; | |
705 | dirty += jeb->dirty_size; | |
706 | printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
707 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
708 | } | |
709 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | |
710 | numblocks, dirty, dirty / numblocks); | |
711 | } | |
712 | if (list_empty(&c->dirty_list)) { | |
713 | printk(KERN_DEBUG "dirty_list: empty\n"); | |
714 | } else { | |
715 | struct list_head *this; | |
716 | int numblocks = 0; | |
717 | uint32_t dirty = 0; | |
718 | ||
719 | list_for_each(this, &c->dirty_list) { | |
720 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
721 | numblocks ++; | |
722 | dirty += jeb->dirty_size; | |
723 | printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
724 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
725 | } | |
726 | printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n", | |
727 | numblocks, dirty, dirty / numblocks); | |
728 | } | |
729 | if (list_empty(&c->erasable_list)) { | |
730 | printk(KERN_DEBUG "erasable_list: empty\n"); | |
731 | } else { | |
732 | struct list_head *this; | |
733 | ||
734 | list_for_each(this, &c->erasable_list) { | |
735 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
736 | printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
737 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
738 | } | |
739 | } | |
740 | if (list_empty(&c->erasing_list)) { | |
741 | printk(KERN_DEBUG "erasing_list: empty\n"); | |
742 | } else { | |
743 | struct list_head *this; | |
744 | ||
745 | list_for_each(this, &c->erasing_list) { | |
746 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
747 | printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
748 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
749 | } | |
750 | } | |
751 | if (list_empty(&c->erase_pending_list)) { | |
752 | printk(KERN_DEBUG "erase_pending_list: empty\n"); | |
753 | } else { | |
754 | struct list_head *this; | |
755 | ||
756 | list_for_each(this, &c->erase_pending_list) { | |
757 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
758 | printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
759 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
760 | } | |
761 | } | |
762 | if (list_empty(&c->erasable_pending_wbuf_list)) { | |
763 | printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n"); | |
764 | } else { | |
765 | struct list_head *this; | |
766 | ||
767 | list_for_each(this, &c->erasable_pending_wbuf_list) { | |
768 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
769 | printk(KERN_DEBUG "erasable_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
770 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
771 | } | |
772 | } | |
773 | if (list_empty(&c->free_list)) { | |
774 | printk(KERN_DEBUG "free_list: empty\n"); | |
775 | } else { | |
776 | struct list_head *this; | |
777 | ||
778 | list_for_each(this, &c->free_list) { | |
779 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
780 | printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
781 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
782 | } | |
783 | } | |
784 | if (list_empty(&c->bad_list)) { | |
785 | printk(KERN_DEBUG "bad_list: empty\n"); | |
786 | } else { | |
787 | struct list_head *this; | |
788 | ||
789 | list_for_each(this, &c->bad_list) { | |
790 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
791 | printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
792 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
793 | } | |
794 | } | |
795 | if (list_empty(&c->bad_used_list)) { | |
796 | printk(KERN_DEBUG "bad_used_list: empty\n"); | |
797 | } else { | |
798 | struct list_head *this; | |
799 | ||
800 | list_for_each(this, &c->bad_used_list) { | |
801 | struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); | |
802 | printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", | |
803 | jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size); | |
804 | } | |
805 | } | |
806 | } | |
807 | #endif /* CONFIG_JFFS2_FS_DEBUG */ | |
808 | ||
809 | int jffs2_thread_should_wake(struct jffs2_sb_info *c) | |
810 | { | |
811 | int ret = 0; | |
812 | uint32_t dirty; | |
813 | ||
814 | if (c->unchecked_size) { | |
815 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n", | |
816 | c->unchecked_size, c->checked_ino)); | |
817 | return 1; | |
818 | } | |
819 | ||
820 | /* dirty_size contains blocks on erase_pending_list | |
821 | * those blocks are counted in c->nr_erasing_blocks. | |
822 | * If one block is actually erased, it is not longer counted as dirty_space | |
823 | * but it is counted in c->nr_erasing_blocks, so we add it and subtract it | |
824 | * with c->nr_erasing_blocks * c->sector_size again. | |
825 | * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks | |
826 | * This helps us to force gc and pick eventually a clean block to spread the load. | |
827 | */ | |
828 | dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size; | |
829 | ||
830 | if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger && | |
831 | (dirty > c->nospc_dirty_size)) | |
832 | ret = 1; | |
833 | ||
834 | D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x: %s\n", | |
835 | c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, ret?"yes":"no")); | |
836 | ||
837 | return ret; | |
838 | } |