Merge tag 'seccomp-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux...
[deliverable/linux.git] / fs / jffs2 / fs.c
1 /*
2 * JFFS2 -- Journalling Flash File System, Version 2.
3 *
4 * Copyright © 2001-2007 Red Hat, Inc.
5 * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 *
9 * For licensing information, see the file 'LICENCE' in this directory.
10 *
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/capability.h>
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/fs.h>
19 #include <linux/list.h>
20 #include <linux/mtd/mtd.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/vfs.h>
25 #include <linux/crc32.h>
26 #include "nodelist.h"
27
28 static int jffs2_flash_setup(struct jffs2_sb_info *c);
29
30 int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
31 {
32 struct jffs2_full_dnode *old_metadata, *new_metadata;
33 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
34 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
35 struct jffs2_raw_inode *ri;
36 union jffs2_device_node dev;
37 unsigned char *mdata = NULL;
38 int mdatalen = 0;
39 unsigned int ivalid;
40 uint32_t alloclen;
41 int ret;
42 int alloc_type = ALLOC_NORMAL;
43
44 jffs2_dbg(1, "%s(): ino #%lu\n", __func__, inode->i_ino);
45
46 /* Special cases - we don't want more than one data node
47 for these types on the medium at any time. So setattr
48 must read the original data associated with the node
49 (i.e. the device numbers or the target name) and write
50 it out again with the appropriate data attached */
51 if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) {
52 /* For these, we don't actually need to read the old node */
53 mdatalen = jffs2_encode_dev(&dev, inode->i_rdev);
54 mdata = (char *)&dev;
55 jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
56 __func__, mdatalen);
57 } else if (S_ISLNK(inode->i_mode)) {
58 mutex_lock(&f->sem);
59 mdatalen = f->metadata->size;
60 mdata = kmalloc(f->metadata->size, GFP_USER);
61 if (!mdata) {
62 mutex_unlock(&f->sem);
63 return -ENOMEM;
64 }
65 ret = jffs2_read_dnode(c, f, f->metadata, mdata, 0, mdatalen);
66 if (ret) {
67 mutex_unlock(&f->sem);
68 kfree(mdata);
69 return ret;
70 }
71 mutex_unlock(&f->sem);
72 jffs2_dbg(1, "%s(): Writing %d bytes of symlink target\n",
73 __func__, mdatalen);
74 }
75
76 ri = jffs2_alloc_raw_inode();
77 if (!ri) {
78 if (S_ISLNK(inode->i_mode))
79 kfree(mdata);
80 return -ENOMEM;
81 }
82
83 ret = jffs2_reserve_space(c, sizeof(*ri) + mdatalen, &alloclen,
84 ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
85 if (ret) {
86 jffs2_free_raw_inode(ri);
87 if (S_ISLNK(inode->i_mode))
88 kfree(mdata);
89 return ret;
90 }
91 mutex_lock(&f->sem);
92 ivalid = iattr->ia_valid;
93
94 ri->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
95 ri->nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
96 ri->totlen = cpu_to_je32(sizeof(*ri) + mdatalen);
97 ri->hdr_crc = cpu_to_je32(crc32(0, ri, sizeof(struct jffs2_unknown_node)-4));
98
99 ri->ino = cpu_to_je32(inode->i_ino);
100 ri->version = cpu_to_je32(++f->highest_version);
101
102 ri->uid = cpu_to_je16((ivalid & ATTR_UID)?
103 from_kuid(&init_user_ns, iattr->ia_uid):i_uid_read(inode));
104 ri->gid = cpu_to_je16((ivalid & ATTR_GID)?
105 from_kgid(&init_user_ns, iattr->ia_gid):i_gid_read(inode));
106
107 if (ivalid & ATTR_MODE)
108 ri->mode = cpu_to_jemode(iattr->ia_mode);
109 else
110 ri->mode = cpu_to_jemode(inode->i_mode);
111
112
113 ri->isize = cpu_to_je32((ivalid & ATTR_SIZE)?iattr->ia_size:inode->i_size);
114 ri->atime = cpu_to_je32(I_SEC((ivalid & ATTR_ATIME)?iattr->ia_atime:inode->i_atime));
115 ri->mtime = cpu_to_je32(I_SEC((ivalid & ATTR_MTIME)?iattr->ia_mtime:inode->i_mtime));
116 ri->ctime = cpu_to_je32(I_SEC((ivalid & ATTR_CTIME)?iattr->ia_ctime:inode->i_ctime));
117
118 ri->offset = cpu_to_je32(0);
119 ri->csize = ri->dsize = cpu_to_je32(mdatalen);
120 ri->compr = JFFS2_COMPR_NONE;
121 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
122 /* It's an extension. Make it a hole node */
123 ri->compr = JFFS2_COMPR_ZERO;
124 ri->dsize = cpu_to_je32(iattr->ia_size - inode->i_size);
125 ri->offset = cpu_to_je32(inode->i_size);
126 } else if (ivalid & ATTR_SIZE && !iattr->ia_size) {
127 /* For truncate-to-zero, treat it as deletion because
128 it'll always be obsoleting all previous nodes */
129 alloc_type = ALLOC_DELETION;
130 }
131 ri->node_crc = cpu_to_je32(crc32(0, ri, sizeof(*ri)-8));
132 if (mdatalen)
133 ri->data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
134 else
135 ri->data_crc = cpu_to_je32(0);
136
137 new_metadata = jffs2_write_dnode(c, f, ri, mdata, mdatalen, alloc_type);
138 if (S_ISLNK(inode->i_mode))
139 kfree(mdata);
140
141 if (IS_ERR(new_metadata)) {
142 jffs2_complete_reservation(c);
143 jffs2_free_raw_inode(ri);
144 mutex_unlock(&f->sem);
145 return PTR_ERR(new_metadata);
146 }
147 /* It worked. Update the inode */
148 inode->i_atime = ITIME(je32_to_cpu(ri->atime));
149 inode->i_ctime = ITIME(je32_to_cpu(ri->ctime));
150 inode->i_mtime = ITIME(je32_to_cpu(ri->mtime));
151 inode->i_mode = jemode_to_cpu(ri->mode);
152 i_uid_write(inode, je16_to_cpu(ri->uid));
153 i_gid_write(inode, je16_to_cpu(ri->gid));
154
155
156 old_metadata = f->metadata;
157
158 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size)
159 jffs2_truncate_fragtree (c, &f->fragtree, iattr->ia_size);
160
161 if (ivalid & ATTR_SIZE && inode->i_size < iattr->ia_size) {
162 jffs2_add_full_dnode_to_inode(c, f, new_metadata);
163 inode->i_size = iattr->ia_size;
164 inode->i_blocks = (inode->i_size + 511) >> 9;
165 f->metadata = NULL;
166 } else {
167 f->metadata = new_metadata;
168 }
169 if (old_metadata) {
170 jffs2_mark_node_obsolete(c, old_metadata->raw);
171 jffs2_free_full_dnode(old_metadata);
172 }
173 jffs2_free_raw_inode(ri);
174
175 mutex_unlock(&f->sem);
176 jffs2_complete_reservation(c);
177
178 /* We have to do the truncate_setsize() without f->sem held, since
179 some pages may be locked and waiting for it in readpage().
180 We are protected from a simultaneous write() extending i_size
181 back past iattr->ia_size, because do_truncate() holds the
182 generic inode semaphore. */
183 if (ivalid & ATTR_SIZE && inode->i_size > iattr->ia_size) {
184 truncate_setsize(inode, iattr->ia_size);
185 inode->i_blocks = (inode->i_size + 511) >> 9;
186 }
187
188 return 0;
189 }
190
191 int jffs2_setattr(struct dentry *dentry, struct iattr *iattr)
192 {
193 struct inode *inode = d_inode(dentry);
194 int rc;
195
196 rc = inode_change_ok(inode, iattr);
197 if (rc)
198 return rc;
199
200 rc = jffs2_do_setattr(inode, iattr);
201 if (!rc && (iattr->ia_valid & ATTR_MODE))
202 rc = posix_acl_chmod(inode, inode->i_mode);
203
204 return rc;
205 }
206
207 int jffs2_statfs(struct dentry *dentry, struct kstatfs *buf)
208 {
209 struct jffs2_sb_info *c = JFFS2_SB_INFO(dentry->d_sb);
210 unsigned long avail;
211
212 buf->f_type = JFFS2_SUPER_MAGIC;
213 buf->f_bsize = 1 << PAGE_SHIFT;
214 buf->f_blocks = c->flash_size >> PAGE_SHIFT;
215 buf->f_files = 0;
216 buf->f_ffree = 0;
217 buf->f_namelen = JFFS2_MAX_NAME_LEN;
218 buf->f_fsid.val[0] = JFFS2_SUPER_MAGIC;
219 buf->f_fsid.val[1] = c->mtd->index;
220
221 spin_lock(&c->erase_completion_lock);
222 avail = c->dirty_size + c->free_size;
223 if (avail > c->sector_size * c->resv_blocks_write)
224 avail -= c->sector_size * c->resv_blocks_write;
225 else
226 avail = 0;
227 spin_unlock(&c->erase_completion_lock);
228
229 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT;
230
231 return 0;
232 }
233
234
235 void jffs2_evict_inode (struct inode *inode)
236 {
237 /* We can forget about this inode for now - drop all
238 * the nodelists associated with it, etc.
239 */
240 struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
241 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
242
243 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
244 __func__, inode->i_ino, inode->i_mode);
245 truncate_inode_pages_final(&inode->i_data);
246 clear_inode(inode);
247 jffs2_do_clear_inode(c, f);
248 }
249
250 struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
251 {
252 struct jffs2_inode_info *f;
253 struct jffs2_sb_info *c;
254 struct jffs2_raw_inode latest_node;
255 union jffs2_device_node jdev;
256 struct inode *inode;
257 dev_t rdev = 0;
258 int ret;
259
260 jffs2_dbg(1, "%s(): ino == %lu\n", __func__, ino);
261
262 inode = iget_locked(sb, ino);
263 if (!inode)
264 return ERR_PTR(-ENOMEM);
265 if (!(inode->i_state & I_NEW))
266 return inode;
267
268 f = JFFS2_INODE_INFO(inode);
269 c = JFFS2_SB_INFO(inode->i_sb);
270
271 jffs2_init_inode_info(f);
272 mutex_lock(&f->sem);
273
274 ret = jffs2_do_read_inode(c, f, inode->i_ino, &latest_node);
275 if (ret)
276 goto error;
277
278 inode->i_mode = jemode_to_cpu(latest_node.mode);
279 i_uid_write(inode, je16_to_cpu(latest_node.uid));
280 i_gid_write(inode, je16_to_cpu(latest_node.gid));
281 inode->i_size = je32_to_cpu(latest_node.isize);
282 inode->i_atime = ITIME(je32_to_cpu(latest_node.atime));
283 inode->i_mtime = ITIME(je32_to_cpu(latest_node.mtime));
284 inode->i_ctime = ITIME(je32_to_cpu(latest_node.ctime));
285
286 set_nlink(inode, f->inocache->pino_nlink);
287
288 inode->i_blocks = (inode->i_size + 511) >> 9;
289
290 switch (inode->i_mode & S_IFMT) {
291
292 case S_IFLNK:
293 inode->i_op = &jffs2_symlink_inode_operations;
294 inode->i_link = f->target;
295 break;
296
297 case S_IFDIR:
298 {
299 struct jffs2_full_dirent *fd;
300 set_nlink(inode, 2); /* parent and '.' */
301
302 for (fd=f->dents; fd; fd = fd->next) {
303 if (fd->type == DT_DIR && fd->ino)
304 inc_nlink(inode);
305 }
306 /* Root dir gets i_nlink 3 for some reason */
307 if (inode->i_ino == 1)
308 inc_nlink(inode);
309
310 inode->i_op = &jffs2_dir_inode_operations;
311 inode->i_fop = &jffs2_dir_operations;
312 break;
313 }
314 case S_IFREG:
315 inode->i_op = &jffs2_file_inode_operations;
316 inode->i_fop = &jffs2_file_operations;
317 inode->i_mapping->a_ops = &jffs2_file_address_operations;
318 inode->i_mapping->nrpages = 0;
319 break;
320
321 case S_IFBLK:
322 case S_IFCHR:
323 /* Read the device numbers from the media */
324 if (f->metadata->size != sizeof(jdev.old_id) &&
325 f->metadata->size != sizeof(jdev.new_id)) {
326 pr_notice("Device node has strange size %d\n",
327 f->metadata->size);
328 goto error_io;
329 }
330 jffs2_dbg(1, "Reading device numbers from flash\n");
331 ret = jffs2_read_dnode(c, f, f->metadata, (char *)&jdev, 0, f->metadata->size);
332 if (ret < 0) {
333 /* Eep */
334 pr_notice("Read device numbers for inode %lu failed\n",
335 (unsigned long)inode->i_ino);
336 goto error;
337 }
338 if (f->metadata->size == sizeof(jdev.old_id))
339 rdev = old_decode_dev(je16_to_cpu(jdev.old_id));
340 else
341 rdev = new_decode_dev(je32_to_cpu(jdev.new_id));
342
343 case S_IFSOCK:
344 case S_IFIFO:
345 inode->i_op = &jffs2_file_inode_operations;
346 init_special_inode(inode, inode->i_mode, rdev);
347 break;
348
349 default:
350 pr_warn("%s(): Bogus i_mode %o for ino %lu\n",
351 __func__, inode->i_mode, (unsigned long)inode->i_ino);
352 }
353
354 mutex_unlock(&f->sem);
355
356 jffs2_dbg(1, "jffs2_read_inode() returning\n");
357 unlock_new_inode(inode);
358 return inode;
359
360 error_io:
361 ret = -EIO;
362 error:
363 mutex_unlock(&f->sem);
364 jffs2_do_clear_inode(c, f);
365 iget_failed(inode);
366 return ERR_PTR(ret);
367 }
368
369 void jffs2_dirty_inode(struct inode *inode, int flags)
370 {
371 struct iattr iattr;
372
373 if (!(inode->i_state & I_DIRTY_DATASYNC)) {
374 jffs2_dbg(2, "%s(): not calling setattr() for ino #%lu\n",
375 __func__, inode->i_ino);
376 return;
377 }
378
379 jffs2_dbg(1, "%s(): calling setattr() for ino #%lu\n",
380 __func__, inode->i_ino);
381
382 iattr.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_MTIME|ATTR_CTIME;
383 iattr.ia_mode = inode->i_mode;
384 iattr.ia_uid = inode->i_uid;
385 iattr.ia_gid = inode->i_gid;
386 iattr.ia_atime = inode->i_atime;
387 iattr.ia_mtime = inode->i_mtime;
388 iattr.ia_ctime = inode->i_ctime;
389
390 jffs2_do_setattr(inode, &iattr);
391 }
392
393 int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
394 {
395 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
396
397 if (c->flags & JFFS2_SB_FLAG_RO && !(sb->s_flags & MS_RDONLY))
398 return -EROFS;
399
400 /* We stop if it was running, then restart if it needs to.
401 This also catches the case where it was stopped and this
402 is just a remount to restart it.
403 Flush the writebuffer, if neccecary, else we loose it */
404 if (!(sb->s_flags & MS_RDONLY)) {
405 jffs2_stop_garbage_collect_thread(c);
406 mutex_lock(&c->alloc_sem);
407 jffs2_flush_wbuf_pad(c);
408 mutex_unlock(&c->alloc_sem);
409 }
410
411 if (!(*flags & MS_RDONLY))
412 jffs2_start_garbage_collect_thread(c);
413
414 *flags |= MS_NOATIME;
415 return 0;
416 }
417
418 /* jffs2_new_inode: allocate a new inode and inocache, add it to the hash,
419 fill in the raw_inode while you're at it. */
420 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode, struct jffs2_raw_inode *ri)
421 {
422 struct inode *inode;
423 struct super_block *sb = dir_i->i_sb;
424 struct jffs2_sb_info *c;
425 struct jffs2_inode_info *f;
426 int ret;
427
428 jffs2_dbg(1, "%s(): dir_i %ld, mode 0x%x\n",
429 __func__, dir_i->i_ino, mode);
430
431 c = JFFS2_SB_INFO(sb);
432
433 inode = new_inode(sb);
434
435 if (!inode)
436 return ERR_PTR(-ENOMEM);
437
438 f = JFFS2_INODE_INFO(inode);
439 jffs2_init_inode_info(f);
440 mutex_lock(&f->sem);
441
442 memset(ri, 0, sizeof(*ri));
443 /* Set OS-specific defaults for new inodes */
444 ri->uid = cpu_to_je16(from_kuid(&init_user_ns, current_fsuid()));
445
446 if (dir_i->i_mode & S_ISGID) {
447 ri->gid = cpu_to_je16(i_gid_read(dir_i));
448 if (S_ISDIR(mode))
449 mode |= S_ISGID;
450 } else {
451 ri->gid = cpu_to_je16(from_kgid(&init_user_ns, current_fsgid()));
452 }
453
454 /* POSIX ACLs have to be processed now, at least partly.
455 The umask is only applied if there's no default ACL */
456 ret = jffs2_init_acl_pre(dir_i, inode, &mode);
457 if (ret) {
458 mutex_unlock(&f->sem);
459 make_bad_inode(inode);
460 iput(inode);
461 return ERR_PTR(ret);
462 }
463 ret = jffs2_do_new_inode (c, f, mode, ri);
464 if (ret) {
465 mutex_unlock(&f->sem);
466 make_bad_inode(inode);
467 iput(inode);
468 return ERR_PTR(ret);
469 }
470 set_nlink(inode, 1);
471 inode->i_ino = je32_to_cpu(ri->ino);
472 inode->i_mode = jemode_to_cpu(ri->mode);
473 i_gid_write(inode, je16_to_cpu(ri->gid));
474 i_uid_write(inode, je16_to_cpu(ri->uid));
475 inode->i_atime = inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
476 ri->atime = ri->mtime = ri->ctime = cpu_to_je32(I_SEC(inode->i_mtime));
477
478 inode->i_blocks = 0;
479 inode->i_size = 0;
480
481 if (insert_inode_locked(inode) < 0) {
482 mutex_unlock(&f->sem);
483 make_bad_inode(inode);
484 iput(inode);
485 return ERR_PTR(-EINVAL);
486 }
487
488 return inode;
489 }
490
491 static int calculate_inocache_hashsize(uint32_t flash_size)
492 {
493 /*
494 * Pick a inocache hash size based on the size of the medium.
495 * Count how many megabytes we're dealing with, apply a hashsize twice
496 * that size, but rounding down to the usual big powers of 2. And keep
497 * to sensible bounds.
498 */
499
500 int size_mb = flash_size / 1024 / 1024;
501 int hashsize = (size_mb * 2) & ~0x3f;
502
503 if (hashsize < INOCACHE_HASHSIZE_MIN)
504 return INOCACHE_HASHSIZE_MIN;
505 if (hashsize > INOCACHE_HASHSIZE_MAX)
506 return INOCACHE_HASHSIZE_MAX;
507
508 return hashsize;
509 }
510
511 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent)
512 {
513 struct jffs2_sb_info *c;
514 struct inode *root_i;
515 int ret;
516 size_t blocks;
517
518 c = JFFS2_SB_INFO(sb);
519
520 /* Do not support the MLC nand */
521 if (c->mtd->type == MTD_MLCNANDFLASH)
522 return -EINVAL;
523
524 #ifndef CONFIG_JFFS2_FS_WRITEBUFFER
525 if (c->mtd->type == MTD_NANDFLASH) {
526 pr_err("Cannot operate on NAND flash unless jffs2 NAND support is compiled in\n");
527 return -EINVAL;
528 }
529 if (c->mtd->type == MTD_DATAFLASH) {
530 pr_err("Cannot operate on DataFlash unless jffs2 DataFlash support is compiled in\n");
531 return -EINVAL;
532 }
533 #endif
534
535 c->flash_size = c->mtd->size;
536 c->sector_size = c->mtd->erasesize;
537 blocks = c->flash_size / c->sector_size;
538
539 /*
540 * Size alignment check
541 */
542 if ((c->sector_size * blocks) != c->flash_size) {
543 c->flash_size = c->sector_size * blocks;
544 pr_info("Flash size not aligned to erasesize, reducing to %dKiB\n",
545 c->flash_size / 1024);
546 }
547
548 if (c->flash_size < 5*c->sector_size) {
549 pr_err("Too few erase blocks (%d)\n",
550 c->flash_size / c->sector_size);
551 return -EINVAL;
552 }
553
554 c->cleanmarker_size = sizeof(struct jffs2_unknown_node);
555
556 /* NAND (or other bizarre) flash... do setup accordingly */
557 ret = jffs2_flash_setup(c);
558 if (ret)
559 return ret;
560
561 c->inocache_hashsize = calculate_inocache_hashsize(c->flash_size);
562 c->inocache_list = kcalloc(c->inocache_hashsize, sizeof(struct jffs2_inode_cache *), GFP_KERNEL);
563 if (!c->inocache_list) {
564 ret = -ENOMEM;
565 goto out_wbuf;
566 }
567
568 jffs2_init_xattr_subsystem(c);
569
570 if ((ret = jffs2_do_mount_fs(c)))
571 goto out_inohash;
572
573 jffs2_dbg(1, "%s(): Getting root inode\n", __func__);
574 root_i = jffs2_iget(sb, 1);
575 if (IS_ERR(root_i)) {
576 jffs2_dbg(1, "get root inode failed\n");
577 ret = PTR_ERR(root_i);
578 goto out_root;
579 }
580
581 ret = -ENOMEM;
582
583 jffs2_dbg(1, "%s(): d_make_root()\n", __func__);
584 sb->s_root = d_make_root(root_i);
585 if (!sb->s_root)
586 goto out_root;
587
588 sb->s_maxbytes = 0xFFFFFFFF;
589 sb->s_blocksize = PAGE_CACHE_SIZE;
590 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
591 sb->s_magic = JFFS2_SUPER_MAGIC;
592 if (!(sb->s_flags & MS_RDONLY))
593 jffs2_start_garbage_collect_thread(c);
594 return 0;
595
596 out_root:
597 jffs2_free_ino_caches(c);
598 jffs2_free_raw_node_refs(c);
599 if (jffs2_blocks_use_vmalloc(c))
600 vfree(c->blocks);
601 else
602 kfree(c->blocks);
603 out_inohash:
604 jffs2_clear_xattr_subsystem(c);
605 kfree(c->inocache_list);
606 out_wbuf:
607 jffs2_flash_cleanup(c);
608
609 return ret;
610 }
611
612 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
613 struct jffs2_inode_info *f)
614 {
615 iput(OFNI_EDONI_2SFFJ(f));
616 }
617
618 struct jffs2_inode_info *jffs2_gc_fetch_inode(struct jffs2_sb_info *c,
619 int inum, int unlinked)
620 {
621 struct inode *inode;
622 struct jffs2_inode_cache *ic;
623
624 if (unlinked) {
625 /* The inode has zero nlink but its nodes weren't yet marked
626 obsolete. This has to be because we're still waiting for
627 the final (close() and) iput() to happen.
628
629 There's a possibility that the final iput() could have
630 happened while we were contemplating. In order to ensure
631 that we don't cause a new read_inode() (which would fail)
632 for the inode in question, we use ilookup() in this case
633 instead of iget().
634
635 The nlink can't _become_ zero at this point because we're
636 holding the alloc_sem, and jffs2_do_unlink() would also
637 need that while decrementing nlink on any inode.
638 */
639 inode = ilookup(OFNI_BS_2SFFJ(c), inum);
640 if (!inode) {
641 jffs2_dbg(1, "ilookup() failed for ino #%u; inode is probably deleted.\n",
642 inum);
643
644 spin_lock(&c->inocache_lock);
645 ic = jffs2_get_ino_cache(c, inum);
646 if (!ic) {
647 jffs2_dbg(1, "Inode cache for ino #%u is gone\n",
648 inum);
649 spin_unlock(&c->inocache_lock);
650 return NULL;
651 }
652 if (ic->state != INO_STATE_CHECKEDABSENT) {
653 /* Wait for progress. Don't just loop */
654 jffs2_dbg(1, "Waiting for ino #%u in state %d\n",
655 ic->ino, ic->state);
656 sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
657 } else {
658 spin_unlock(&c->inocache_lock);
659 }
660
661 return NULL;
662 }
663 } else {
664 /* Inode has links to it still; they're not going away because
665 jffs2_do_unlink() would need the alloc_sem and we have it.
666 Just iget() it, and if read_inode() is necessary that's OK.
667 */
668 inode = jffs2_iget(OFNI_BS_2SFFJ(c), inum);
669 if (IS_ERR(inode))
670 return ERR_CAST(inode);
671 }
672 if (is_bad_inode(inode)) {
673 pr_notice("Eep. read_inode() failed for ino #%u. unlinked %d\n",
674 inum, unlinked);
675 /* NB. This will happen again. We need to do something appropriate here. */
676 iput(inode);
677 return ERR_PTR(-EIO);
678 }
679
680 return JFFS2_INODE_INFO(inode);
681 }
682
683 unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c,
684 struct jffs2_inode_info *f,
685 unsigned long offset,
686 unsigned long *priv)
687 {
688 struct inode *inode = OFNI_EDONI_2SFFJ(f);
689 struct page *pg;
690
691 pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT,
692 (void *)jffs2_do_readpage_unlock, inode);
693 if (IS_ERR(pg))
694 return (void *)pg;
695
696 *priv = (unsigned long)pg;
697 return kmap(pg);
698 }
699
700 void jffs2_gc_release_page(struct jffs2_sb_info *c,
701 unsigned char *ptr,
702 unsigned long *priv)
703 {
704 struct page *pg = (void *)*priv;
705
706 kunmap(pg);
707 page_cache_release(pg);
708 }
709
710 static int jffs2_flash_setup(struct jffs2_sb_info *c) {
711 int ret = 0;
712
713 if (jffs2_cleanmarker_oob(c)) {
714 /* NAND flash... do setup accordingly */
715 ret = jffs2_nand_flash_setup(c);
716 if (ret)
717 return ret;
718 }
719
720 /* and Dataflash */
721 if (jffs2_dataflash(c)) {
722 ret = jffs2_dataflash_setup(c);
723 if (ret)
724 return ret;
725 }
726
727 /* and Intel "Sibley" flash */
728 if (jffs2_nor_wbuf_flash(c)) {
729 ret = jffs2_nor_wbuf_flash_setup(c);
730 if (ret)
731 return ret;
732 }
733
734 /* and an UBI volume */
735 if (jffs2_ubivol(c)) {
736 ret = jffs2_ubivol_setup(c);
737 if (ret)
738 return ret;
739 }
740
741 return ret;
742 }
743
744 void jffs2_flash_cleanup(struct jffs2_sb_info *c) {
745
746 if (jffs2_cleanmarker_oob(c)) {
747 jffs2_nand_flash_cleanup(c);
748 }
749
750 /* and DataFlash */
751 if (jffs2_dataflash(c)) {
752 jffs2_dataflash_cleanup(c);
753 }
754
755 /* and Intel "Sibley" flash */
756 if (jffs2_nor_wbuf_flash(c)) {
757 jffs2_nor_wbuf_flash_cleanup(c);
758 }
759
760 /* and an UBI volume */
761 if (jffs2_ubivol(c)) {
762 jffs2_ubivol_cleanup(c);
763 }
764 }
This page took 0.059922 seconds and 6 git commands to generate.