a69e426435ddc3d2d207701df7a5b06264c1c481
[deliverable/linux.git] / fs / jffs2 / fs.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/fs.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
26 #include "nodelist.h"
27
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
29
30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
31 {
32 struct jffs2_full_dnode *old_metadata, *new_metadata;
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri;
36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL;
38 int mdatalen = 0;
39 unsigned int ivalid;
40 uint32_t alloclen;
41 int ret;
42 int alloc_type = ALLOC_NORMAL;
43
44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
45
46 /* Special cases - we don't want more than one data node
47 for these types on the medium at any time. So setattr
48 must read the original data associated with the node
49 (i.e. the device numbers or the target name) and write
50 it out again with the appropriate data attached */
51 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 /* For these, we don't actually need to read the old node */
53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
54 mdata = (char *)&dev;
55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
56 __func__, mdatalen);
57 } else if (S_ISLNK(inode->i_mode)) {
58 mutex_lock(&f->sem);
59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER);
61 if (!mdata) {
62 mutex_unlock(&f->sem);
63 return -ENOMEM;
64 }
65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
66 if (ret) {
67 mutex_unlock(&f->sem);
68 kfree(mdata);
69 return ret;
70 }
71 mutex_unlock(&f->sem);
72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
73 __func__, mdatalen);
74 }
75
76 ri = jffs2_alloc_raw_inode();
77 if (!ri) {
78 if (S_ISLNK(inode->i_mode))
79 kfree(mdata);
80 return -ENOMEM;
81 }
82
83 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
85 if (ret) {
86 jffs2_free_raw_inode(ri);
87 if (S_ISLNK(inode->i_mode))
88 kfree(mdata);
89 return ret;
90 }
91 mutex_lock(&f->sem);
92 ivalid = iattr->ia_valid;
93
94 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
98
99 ri->ino = cpu_to_je32(inode->i_ino);
100 ri->version = cpu_to_je32(++f->highest_version);
101
102 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
103 from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
104 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
105 from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
106
107 if (ivalid & ATTR_MODE)
108 ri->mode = cpu_to_jemode(iattr->ia_mode);
109 else
110 ri->mode = cpu_to_jemode(inode->i_mode);
111
112
113 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
114 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
115 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
116 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
117
118 ri->offset = cpu_to_je32(0);
119 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
120 ri->compr = JFFS2_COMPR_NONE;
121 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
122 /* It's an extension. Make it a hole node */
123 ri->compr = JFFS2_COMPR_ZERO;
124 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
125 ri->offset = cpu_to_je32(inode->i_size);
126 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
127 /* For truncate-to-zero, treat it as deletion because
128 it'll always be obsoleting all previous nodes */
129 alloc_type = ALLOC_DELETION;
130 }
131 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
132 if (mdatalen)
133 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
134 else
135 ri->data_crc = cpu_to_je32(0);
136
137 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
138 if (S_ISLNK(inode->i_mode))
139 kfree(mdata);
140
141 if (IS_ERR(new_metadata)) {
142 jffs2_complete_reservation(c);
143 jffs2_free_raw_inode(ri);
144 mutex_unlock(&f->sem);
145 return PTR_ERR(new_metadata);
146 }
147 /* It worked. Update the inode */
148 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
149 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
150 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
151 inode->i_mode = jemode_to_cpu(ri->mode);
152 i_uid_write(inode, je16_to_cpu(ri->uid));
153 i_gid_write(inode, je16_to_cpu(ri->gid));
154
155
156 old_metadata = f->metadata;
157
158 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
159 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
160
161 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
162 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
163 inode->i_size = iattr->ia_size;
164 inode->i_blocks = (inode->i_size + 511) >> 9;
165 f->metadata = NULL;
166 } else {
167 f->metadata = new_metadata;
168 }
169 if (old_metadata) {
170 jffs2_mark_node_obsolete(c, old_metadata->raw);
171 jffs2_free_full_dnode(old_metadata);
172 }
173 jffs2_free_raw_inode(ri);
174
175 mutex_unlock(&f->sem);
176 jffs2_complete_reservation(c);
177
178 /* We have to do the truncate_setsize() without f->sem held, since
179 some pages may be locked and waiting for it in readpage().
180 We are protected from a simultaneous write() extending i_size
181 back past iattr->ia_size, because do_truncate() holds the
182 generic inode semaphore. */
183 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
184 truncate_setsize(inode, iattr->ia_size);
185 inode->i_blocks = (inode->i_size + 511) >> 9;
186 }
187
188 return 0;
189 }
190
191 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
192 {
193 struct inode *inode = dentry->d_inode;
194 int rc;
195
196 rc = inode_change_ok(inode, iattr);
197 if (rc)
198 return rc;
199
200 rc = jffs2_do_setattr(inode, iattr);
201 if (!rc && (iattr->ia_valid & ATTR_MODE))
202 rc = posix_acl_chmod(inode, inode->i_mode);
203
204 return rc;
205 }
206
207 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
208 {
209 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
210 unsigned long avail;
211
212 buf->f_type = JFFS2_SUPER_MAGIC;
213 buf->f_bsize = 1 << PAGE_SHIFT;
214 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
215 buf->f_files = 0;
216 buf->f_ffree = 0;
217 buf->f_namelen = JFFS2_MAX_NAME_LEN;
218 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
219 buf->f_fsid.val[1] = c->mtd->index;
220
221 spin_lock(&c->erase_completion_lock);
222 avail = c->dirty_size + c->free_size;
223 if (avail > c->sector_size * c->resv_blocks_write)
224 avail -= c->sector_size * c->resv_blocks_write;
225 else
226 avail = 0;
227 spin_unlock(&c->erase_completion_lock);
228
229 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
230
231 return 0;
232 }
233
234
235 void jffs2_evict_inode (struct inode *inode)
236 {
237 /* We can forget about this inode for now - drop all
238 * the nodelists associated with it, etc.
239 */
240 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
241 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
242
243 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
244 __func__, inode->i_ino, inode->i_mode);
245 truncate_inode_pages(&inode->i_data, 0);
246 clear_inode(inode);
247 jffs2_do_clear_inode(c, f);
248 }
249
250 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
251 {
252 struct jffs2_inode_info *f;
253 struct jffs2_sb_info *c;
254 struct jffs2_raw_inode latest_node;
255 union jffs2_device_node jdev;
256 struct inode *inode;
257 dev_t rdev = 0;
258 int ret;
259
260 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
261
262 inode = iget_locked(sb, ino);
263 if (!inode)
264 return ERR_PTR(-ENOMEM);
265 if (!(inode->i_state & I_NEW))
266 return inode;
267
268 f = JFFS2_INODE_INFO(inode);
269 c = JFFS2_SB_INFO(inode->i_sb);
270
271 jffs2_init_inode_info(f);
272 mutex_lock(&f->sem);
273
274 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
275
276 if (ret) {
277 mutex_unlock(&f->sem);
278 iget_failed(inode);
279 return ERR_PTR(ret);
280 }
281 inode->i_mode = jemode_to_cpu(latest_node.mode);
282 i_uid_write(inode, je16_to_cpu(latest_node.uid));
283 i_gid_write(inode, je16_to_cpu(latest_node.gid));
284 inode->i_size = je32_to_cpu(latest_node.isize);
285 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
286 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
287 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
288
289 set_nlink(inode, f->inocache->pino_nlink);
290
291 inode->i_blocks = (inode->i_size + 511) >> 9;
292
293 switch (inode->i_mode & S_IFMT) {
294
295 case S_IFLNK:
296 inode->i_op = &jffs2_symlink_inode_operations;
297 break;
298
299 case S_IFDIR:
300 {
301 struct jffs2_full_dirent *fd;
302 set_nlink(inode, 2); /* parent and '.' */
303
304 for (fd=f->dents; fd; fd = fd->next) {
305 if (fd->type == DT_DIR && fd->ino)
306 inc_nlink(inode);
307 }
308 /* Root dir gets i_nlink 3 for some reason */
309 if (inode->i_ino == 1)
310 inc_nlink(inode);
311
312 inode->i_op = &jffs2_dir_inode_operations;
313 inode->i_fop = &jffs2_dir_operations;
314 break;
315 }
316 case S_IFREG:
317 inode->i_op = &jffs2_file_inode_operations;
318 inode->i_fop = &jffs2_file_operations;
319 inode->i_mapping->a_ops = &jffs2_file_address_operations;
320 inode->i_mapping->nrpages = 0;
321 break;
322
323 case S_IFBLK:
324 case S_IFCHR:
325 /* Read the device numbers from the media */
326 if (f->metadata->size != sizeof(jdev.old_id) &&
327 f->metadata->size != sizeof(jdev.new_id)) {
328 pr_notice("Device node has strange size %d\n",
329 f->metadata->size);
330 goto error_io;
331 }
332 jffs2_dbg(1, "Reading device numbers from flash\n");
333 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
334 if (ret < 0) {
335 /* Eep */
336 pr_notice("Read device numbers for inode %lu failed\n",
337 (unsigned long)inode->i_ino);
338 goto error;
339 }
340 if (f->metadata->size == sizeof(jdev.old_id))
341 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
342 else
343 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
344
345 case S_IFSOCK:
346 case S_IFIFO:
347 inode->i_op = &jffs2_file_inode_operations;
348 init_special_inode(inode, inode->i_mode, rdev);
349 break;
350
351 default:
352 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
353 __func__, inode->i_mode, (unsigned long)inode->i_ino);
354 }
355
356 mutex_unlock(&f->sem);
357
358 jffs2_dbg(1, "jffs2_read_inode() returning\n");
359 unlock_new_inode(inode);
360 return inode;
361
362 error_io:
363 ret = -EIO;
364 error:
365 mutex_unlock(&f->sem);
366 jffs2_do_clear_inode(c, f);
367 iget_failed(inode);
368 return ERR_PTR(ret);
369 }
370
371 void jffs2_dirty_inode(struct inode *inode, int flags)
372 {
373 struct iattr iattr;
374
375 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
376 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
377 __func__, inode->i_ino);
378 return;
379 }
380
381 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
382 __func__, inode->i_ino);
383
384 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
385 iattr.ia_mode = inode->i_mode;
386 iattr.ia_uid = inode->i_uid;
387 iattr.ia_gid = inode->i_gid;
388 iattr.ia_atime = inode->i_atime;
389 iattr.ia_mtime = inode->i_mtime;
390 iattr.ia_ctime = inode->i_ctime;
391
392 jffs2_do_setattr(inode, &iattr);
393 }
394
395 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
396 {
397 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
398
399 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
400 return -EROFS;
401
402 /* We stop if it was running, then restart if it needs to.
403 This also catches the case where it was stopped and this
404 is just a remount to restart it.
405 Flush the writebuffer, if neccecary, else we loose it */
406 if (!(sb->s_flags & MS_RDONLY)) {
407 jffs2_stop_garbage_collect_thread(c);
408 mutex_lock(&c->alloc_sem);
409 jffs2_flush_wbuf_pad(c);
410 mutex_unlock(&c->alloc_sem);
411 }
412
413 if (!(*flags & MS_RDONLY))
414 jffs2_start_garbage_collect_thread(c);
415
416 *flags |= MS_NOATIME;
417 return 0;
418 }
419
420 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
421 fill in the raw_inode while you're at it. */
422 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
423 {
424 struct inode *inode;
425 struct super_block *sb = dir_i->i_sb;
426 struct jffs2_sb_info *c;
427 struct jffs2_inode_info *f;
428 int ret;
429
430 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
431 __func__, dir_i->i_ino, mode);
432
433 c = JFFS2_SB_INFO(sb);
434
435 inode = new_inode(sb);
436
437 if (!inode)
438 return ERR_PTR(-ENOMEM);
439
440 f = JFFS2_INODE_INFO(inode);
441 jffs2_init_inode_info(f);
442 mutex_lock(&f->sem);
443
444 memset(ri, 0, sizeof(*ri));
445 /* Set OS-specific defaults for new inodes */
446 ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
447
448 if (dir_i->i_mode & S_ISGID) {
449 ri->gid = cpu_to_je16(i_gid_read(dir_i));
450 if (S_ISDIR(mode))
451 mode |= S_ISGID;
452 } else {
453 ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
454 }
455
456 /* POSIX ACLs have to be processed now, at least partly.
457 The umask is only applied if there's no default ACL */
458 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
459 if (ret) {
460 make_bad_inode(inode);
461 iput(inode);
462 return ERR_PTR(ret);
463 }
464 ret = jffs2_do_new_inode (c, f, mode, ri);
465 if (ret) {
466 make_bad_inode(inode);
467 iput(inode);
468 return ERR_PTR(ret);
469 }
470 set_nlink(inode, 1);
471 inode->i_ino = je32_to_cpu(ri->ino);
472 inode->i_mode = jemode_to_cpu(ri->mode);
473 i_gid_write(inode, je16_to_cpu(ri->gid));
474 i_uid_write(inode, je16_to_cpu(ri->uid));
475 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
476 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
477
478 inode->i_blocks = 0;
479 inode->i_size = 0;
480
481 if (insert_inode_locked(inode) < 0) {
482 make_bad_inode(inode);
483 iput(inode);
484 return ERR_PTR(-EINVAL);
485 }
486
487 return inode;
488 }
489
490 static int calculate_inocache_hashsize(uint32_t flash_size)
491 {
492 /*
493 * Pick a inocache hash size based on the size of the medium.
494 * Count how many megabytes we're dealing with, apply a hashsize twice
495 * that size, but rounding down to the usual big powers of 2. And keep
496 * to sensible bounds.
497 */
498
499 int size_mb = flash_size / 1024 / 1024;
500 int hashsize = (size_mb * 2) & ~0x3f;
501
502 if (hashsize < INOCACHE_HASHSIZE_MIN)
503 return INOCACHE_HASHSIZE_MIN;
504 if (hashsize > INOCACHE_HASHSIZE_MAX)
505 return INOCACHE_HASHSIZE_MAX;
506
507 return hashsize;
508 }
509
510 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
511 {
512 struct jffs2_sb_info *c;
513 struct inode *root_i;
514 int ret;
515 size_t blocks;
516
517 c = JFFS2_SB_INFO(sb);
518
519 /* Do not support the MLC nand */
520 if (c->mtd->type == MTD_MLCNANDFLASH)
521 return -EINVAL;
522
523 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
524 if (c->mtd->type == MTD_NANDFLASH) {
525 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
526 return -EINVAL;
527 }
528 if (c->mtd->type == MTD_DATAFLASH) {
529 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
530 return -EINVAL;
531 }
532 #endif
533
534 c->flash_size = c->mtd->size;
535 c->sector_size = c->mtd->erasesize;
536 blocks = c->flash_size / c->sector_size;
537
538 /*
539 * Size alignment check
540 */
541 if ((c->sector_size * blocks) != c->flash_size) {
542 c->flash_size = c->sector_size * blocks;
543 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
544 c->flash_size / 1024);
545 }
546
547 if (c->flash_size < 5*c->sector_size) {
548 pr_err("Too few erase blocks (%d)\n",
549 c->flash_size / c->sector_size);
550 return -EINVAL;
551 }
552
553 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
554
555 /* NAND (or other bizarre) flash... do setup accordingly */
556 ret = jffs2_flash_setup(c);
557 if (ret)
558 return ret;
559
560 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
561 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
562 if (!c->inocache_list) {
563 ret = -ENOMEM;
564 goto out_wbuf;
565 }
566
567 jffs2_init_xattr_subsystem(c);
568
569 if ((ret = jffs2_do_mount_fs(c)))
570 goto out_inohash;
571
572 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
573 root_i = jffs2_iget(sb, 1);
574 if (IS_ERR(root_i)) {
575 jffs2_dbg(1, "get root inode failed\n");
576 ret = PTR_ERR(root_i);
577 goto out_root;
578 }
579
580 ret = -ENOMEM;
581
582 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
583 sb->s_root = d_make_root(root_i);
584 if (!sb->s_root)
585 goto out_root;
586
587 sb->s_maxbytes = 0xFFFFFFFF;
588 sb->s_blocksize = PAGE_CACHE_SIZE;
589 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
590 sb->s_magic = JFFS2_SUPER_MAGIC;
591 if (!(sb->s_flags & MS_RDONLY))
592 jffs2_start_garbage_collect_thread(c);
593 return 0;
594
595 out_root:
596 jffs2_free_ino_caches(c);
597 jffs2_free_raw_node_refs(c);
598 if (jffs2_blocks_use_vmalloc(c))
599 vfree(c->blocks);
600 else
601 kfree(c->blocks);
602 out_inohash:
603 jffs2_clear_xattr_subsystem(c);
604 kfree(c->inocache_list);
605 out_wbuf:
606 jffs2_flash_cleanup(c);
607
608 return ret;
609 }
610
611 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
612 struct jffs2_inode_info *f)
613 {
614 iput(OFNI_EDONI_2SFFJ(f));
615 }
616
617 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
618 int inum, int unlinked)
619 {
620 struct inode *inode;
621 struct jffs2_inode_cache *ic;
622
623 if (unlinked) {
624 /* The inode has zero nlink but its nodes weren't yet marked
625 obsolete. This has to be because we're still waiting for
626 the final (close() and) iput() to happen.
627
628 There's a possibility that the final iput() could have
629 happened while we were contemplating. In order to ensure
630 that we don't cause a new read_inode() (which would fail)
631 for the inode in question, we use ilookup() in this case
632 instead of iget().
633
634 The nlink can't _become_ zero at this point because we're
635 holding the alloc_sem, and jffs2_do_unlink() would also
636 need that while decrementing nlink on any inode.
637 */
638 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
639 if (!inode) {
640 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
641 inum);
642
643 spin_lock(&c->inocache_lock);
644 ic = jffs2_get_ino_cache(c, inum);
645 if (!ic) {
646 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
647 inum);
648 spin_unlock(&c->inocache_lock);
649 return NULL;
650 }
651 if (ic->state != INO_STATE_CHECKEDABSENT) {
652 /* Wait for progress. Don't just loop */
653 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
654 ic->ino, ic->state);
655 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
656 } else {
657 spin_unlock(&c->inocache_lock);
658 }
659
660 return NULL;
661 }
662 } else {
663 /* Inode has links to it still; they're not going away because
664 jffs2_do_unlink() would need the alloc_sem and we have it.
665 Just iget() it, and if read_inode() is necessary that's OK.
666 */
667 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
668 if (IS_ERR(inode))
669 return ERR_CAST(inode);
670 }
671 if (is_bad_inode(inode)) {
672 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
673 inum, unlinked);
674 /* NB. This will happen again. We need to do something appropriate here. */
675 iput(inode);
676 return ERR_PTR(-EIO);
677 }
678
679 return JFFS2_INODE_INFO(inode);
680 }
681
682 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
683 struct jffs2_inode_info *f,
684 unsigned long offset,
685 unsigned long *priv)
686 {
687 struct inode *inode = OFNI_EDONI_2SFFJ(f);
688 struct page *pg;
689
690 pg = read_cache_page_async(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
691 (void *)jffs2_do_readpage_unlock, inode);
692 if (IS_ERR(pg))
693 return (void *)pg;
694
695 *priv = (unsigned long)pg;
696 return kmap(pg);
697 }
698
699 void jffs2_gc_release_page(struct jffs2_sb_info *c,
700 unsigned char *ptr,
701 unsigned long *priv)
702 {
703 struct page *pg = (void *)*priv;
704
705 kunmap(pg);
706 page_cache_release(pg);
707 }
708
709 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
710 int ret = 0;
711
712 if (jffs2_cleanmarker_oob(c)) {
713 /* NAND flash... do setup accordingly */
714 ret = jffs2_nand_flash_setup(c);
715 if (ret)
716 return ret;
717 }
718
719 /* and Dataflash */
720 if (jffs2_dataflash(c)) {
721 ret = jffs2_dataflash_setup(c);
722 if (ret)
723 return ret;
724 }
725
726 /* and Intel "Sibley" flash */
727 if (jffs2_nor_wbuf_flash(c)) {
728 ret = jffs2_nor_wbuf_flash_setup(c);
729 if (ret)
730 return ret;
731 }
732
733 /* and an UBI volume */
734 if (jffs2_ubivol(c)) {
735 ret = jffs2_ubivol_setup(c);
736 if (ret)
737 return ret;
738 }
739
740 return ret;
741 }
742
743 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
744
745 if (jffs2_cleanmarker_oob(c)) {
746 jffs2_nand_flash_cleanup(c);
747 }
748
749 /* and DataFlash */
750 if (jffs2_dataflash(c)) {
751 jffs2_dataflash_cleanup(c);
752 }
753
754 /* and Intel "Sibley" flash */
755 if (jffs2_nor_wbuf_flash(c)) {
756 jffs2_nor_wbuf_flash_cleanup(c);
757 }
758
759 /* and an UBI volume */
760 if (jffs2_ubivol(c)) {
761 jffs2_ubivol_cleanup(c);
762 }
763 }
This page took 0.058686 seconds and 4 git commands to generate.