ext2: Remove i_mutex use from ext2_quota_write()
[deliverable/linux.git] / fs / ext2 / super.c
1 /*
2 * linux/fs/ext2/super.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * Big-endian to little-endian byte-swapping/bitmaps by
16 * David S. Miller (davem@caip.rutgers.edu), 1995
17 */
18
19 #include <linux/module.h>
20 #include <linux/string.h>
21 #include <linux/fs.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/blkdev.h>
25 #include <linux/parser.h>
26 #include <linux/random.h>
27 #include <linux/buffer_head.h>
28 #include <linux/exportfs.h>
29 #include <linux/vfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/mount.h>
32 #include <linux/log2.h>
33 #include <linux/quotaops.h>
34 #include <asm/uaccess.h>
35 #include "ext2.h"
36 #include "xattr.h"
37 #include "acl.h"
38 #include "xip.h"
39
40 static void ext2_sync_super(struct super_block *sb,
41 struct ext2_super_block *es, int wait);
42 static int ext2_remount (struct super_block * sb, int * flags, char * data);
43 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
44 static int ext2_sync_fs(struct super_block *sb, int wait);
45
46 void ext2_error(struct super_block *sb, const char *function,
47 const char *fmt, ...)
48 {
49 struct va_format vaf;
50 va_list args;
51 struct ext2_sb_info *sbi = EXT2_SB(sb);
52 struct ext2_super_block *es = sbi->s_es;
53
54 if (!(sb->s_flags & MS_RDONLY)) {
55 spin_lock(&sbi->s_lock);
56 sbi->s_mount_state |= EXT2_ERROR_FS;
57 es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
58 spin_unlock(&sbi->s_lock);
59 ext2_sync_super(sb, es, 1);
60 }
61
62 va_start(args, fmt);
63
64 vaf.fmt = fmt;
65 vaf.va = &args;
66
67 printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
68 sb->s_id, function, &vaf);
69
70 va_end(args);
71
72 if (test_opt(sb, ERRORS_PANIC))
73 panic("EXT2-fs: panic from previous error\n");
74 if (test_opt(sb, ERRORS_RO)) {
75 ext2_msg(sb, KERN_CRIT,
76 "error: remounting filesystem read-only");
77 sb->s_flags |= MS_RDONLY;
78 }
79 }
80
81 void ext2_msg(struct super_block *sb, const char *prefix,
82 const char *fmt, ...)
83 {
84 struct va_format vaf;
85 va_list args;
86
87 va_start(args, fmt);
88
89 vaf.fmt = fmt;
90 vaf.va = &args;
91
92 printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
93
94 va_end(args);
95 }
96
97 /*
98 * This must be called with sbi->s_lock held.
99 */
100 void ext2_update_dynamic_rev(struct super_block *sb)
101 {
102 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
103
104 if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
105 return;
106
107 ext2_msg(sb, KERN_WARNING,
108 "warning: updating to rev %d because of "
109 "new feature flag, running e2fsck is recommended",
110 EXT2_DYNAMIC_REV);
111
112 es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
113 es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
114 es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
115 /* leave es->s_feature_*compat flags alone */
116 /* es->s_uuid will be set by e2fsck if empty */
117
118 /*
119 * The rest of the superblock fields should be zero, and if not it
120 * means they are likely already in use, so leave them alone. We
121 * can leave it up to e2fsck to clean up any inconsistencies there.
122 */
123 }
124
125 static void ext2_put_super (struct super_block * sb)
126 {
127 int db_count;
128 int i;
129 struct ext2_sb_info *sbi = EXT2_SB(sb);
130
131 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
132
133 ext2_xattr_put_super(sb);
134 if (!(sb->s_flags & MS_RDONLY)) {
135 struct ext2_super_block *es = sbi->s_es;
136
137 spin_lock(&sbi->s_lock);
138 es->s_state = cpu_to_le16(sbi->s_mount_state);
139 spin_unlock(&sbi->s_lock);
140 ext2_sync_super(sb, es, 1);
141 }
142 db_count = sbi->s_gdb_count;
143 for (i = 0; i < db_count; i++)
144 if (sbi->s_group_desc[i])
145 brelse (sbi->s_group_desc[i]);
146 kfree(sbi->s_group_desc);
147 kfree(sbi->s_debts);
148 percpu_counter_destroy(&sbi->s_freeblocks_counter);
149 percpu_counter_destroy(&sbi->s_freeinodes_counter);
150 percpu_counter_destroy(&sbi->s_dirs_counter);
151 brelse (sbi->s_sbh);
152 sb->s_fs_info = NULL;
153 kfree(sbi->s_blockgroup_lock);
154 kfree(sbi);
155 }
156
157 static struct kmem_cache * ext2_inode_cachep;
158
159 static struct inode *ext2_alloc_inode(struct super_block *sb)
160 {
161 struct ext2_inode_info *ei;
162 ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
163 if (!ei)
164 return NULL;
165 ei->i_block_alloc_info = NULL;
166 ei->vfs_inode.i_version = 1;
167 return &ei->vfs_inode;
168 }
169
170 static void ext2_i_callback(struct rcu_head *head)
171 {
172 struct inode *inode = container_of(head, struct inode, i_rcu);
173 kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
174 }
175
176 static void ext2_destroy_inode(struct inode *inode)
177 {
178 call_rcu(&inode->i_rcu, ext2_i_callback);
179 }
180
181 static void init_once(void *foo)
182 {
183 struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
184
185 rwlock_init(&ei->i_meta_lock);
186 #ifdef CONFIG_EXT2_FS_XATTR
187 init_rwsem(&ei->xattr_sem);
188 #endif
189 mutex_init(&ei->truncate_mutex);
190 inode_init_once(&ei->vfs_inode);
191 }
192
193 static int init_inodecache(void)
194 {
195 ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
196 sizeof(struct ext2_inode_info),
197 0, (SLAB_RECLAIM_ACCOUNT|
198 SLAB_MEM_SPREAD),
199 init_once);
200 if (ext2_inode_cachep == NULL)
201 return -ENOMEM;
202 return 0;
203 }
204
205 static void destroy_inodecache(void)
206 {
207 kmem_cache_destroy(ext2_inode_cachep);
208 }
209
210 static int ext2_show_options(struct seq_file *seq, struct dentry *root)
211 {
212 struct super_block *sb = root->d_sb;
213 struct ext2_sb_info *sbi = EXT2_SB(sb);
214 struct ext2_super_block *es = sbi->s_es;
215 unsigned long def_mount_opts;
216
217 spin_lock(&sbi->s_lock);
218 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
219
220 if (sbi->s_sb_block != 1)
221 seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
222 if (test_opt(sb, MINIX_DF))
223 seq_puts(seq, ",minixdf");
224 if (test_opt(sb, GRPID))
225 seq_puts(seq, ",grpid");
226 if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS))
227 seq_puts(seq, ",nogrpid");
228 if (sbi->s_resuid != EXT2_DEF_RESUID ||
229 le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) {
230 seq_printf(seq, ",resuid=%u", sbi->s_resuid);
231 }
232 if (sbi->s_resgid != EXT2_DEF_RESGID ||
233 le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
234 seq_printf(seq, ",resgid=%u", sbi->s_resgid);
235 }
236 if (test_opt(sb, ERRORS_RO)) {
237 int def_errors = le16_to_cpu(es->s_errors);
238
239 if (def_errors == EXT2_ERRORS_PANIC ||
240 def_errors == EXT2_ERRORS_CONTINUE) {
241 seq_puts(seq, ",errors=remount-ro");
242 }
243 }
244 if (test_opt(sb, ERRORS_CONT))
245 seq_puts(seq, ",errors=continue");
246 if (test_opt(sb, ERRORS_PANIC))
247 seq_puts(seq, ",errors=panic");
248 if (test_opt(sb, NO_UID32))
249 seq_puts(seq, ",nouid32");
250 if (test_opt(sb, DEBUG))
251 seq_puts(seq, ",debug");
252 if (test_opt(sb, OLDALLOC))
253 seq_puts(seq, ",oldalloc");
254
255 #ifdef CONFIG_EXT2_FS_XATTR
256 if (test_opt(sb, XATTR_USER))
257 seq_puts(seq, ",user_xattr");
258 if (!test_opt(sb, XATTR_USER) &&
259 (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
260 seq_puts(seq, ",nouser_xattr");
261 }
262 #endif
263
264 #ifdef CONFIG_EXT2_FS_POSIX_ACL
265 if (test_opt(sb, POSIX_ACL))
266 seq_puts(seq, ",acl");
267 if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL))
268 seq_puts(seq, ",noacl");
269 #endif
270
271 if (test_opt(sb, NOBH))
272 seq_puts(seq, ",nobh");
273
274 #if defined(CONFIG_QUOTA)
275 if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
276 seq_puts(seq, ",usrquota");
277
278 if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
279 seq_puts(seq, ",grpquota");
280 #endif
281
282 #if defined(CONFIG_EXT2_FS_XIP)
283 if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
284 seq_puts(seq, ",xip");
285 #endif
286
287 if (!test_opt(sb, RESERVATION))
288 seq_puts(seq, ",noreservation");
289
290 spin_unlock(&sbi->s_lock);
291 return 0;
292 }
293
294 #ifdef CONFIG_QUOTA
295 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
296 static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
297 #endif
298
299 static const struct super_operations ext2_sops = {
300 .alloc_inode = ext2_alloc_inode,
301 .destroy_inode = ext2_destroy_inode,
302 .write_inode = ext2_write_inode,
303 .evict_inode = ext2_evict_inode,
304 .put_super = ext2_put_super,
305 .sync_fs = ext2_sync_fs,
306 .statfs = ext2_statfs,
307 .remount_fs = ext2_remount,
308 .show_options = ext2_show_options,
309 #ifdef CONFIG_QUOTA
310 .quota_read = ext2_quota_read,
311 .quota_write = ext2_quota_write,
312 #endif
313 };
314
315 static struct inode *ext2_nfs_get_inode(struct super_block *sb,
316 u64 ino, u32 generation)
317 {
318 struct inode *inode;
319
320 if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO)
321 return ERR_PTR(-ESTALE);
322 if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
323 return ERR_PTR(-ESTALE);
324
325 /*
326 * ext2_iget isn't quite right if the inode is currently unallocated!
327 * However ext2_iget currently does appropriate checks to handle stale
328 * inodes so everything is OK.
329 */
330 inode = ext2_iget(sb, ino);
331 if (IS_ERR(inode))
332 return ERR_CAST(inode);
333 if (generation && inode->i_generation != generation) {
334 /* we didn't find the right inode.. */
335 iput(inode);
336 return ERR_PTR(-ESTALE);
337 }
338 return inode;
339 }
340
341 static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid,
342 int fh_len, int fh_type)
343 {
344 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
345 ext2_nfs_get_inode);
346 }
347
348 static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
349 int fh_len, int fh_type)
350 {
351 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
352 ext2_nfs_get_inode);
353 }
354
355 /* Yes, most of these are left as NULL!!
356 * A NULL value implies the default, which works with ext2-like file
357 * systems, but can be improved upon.
358 * Currently only get_parent is required.
359 */
360 static const struct export_operations ext2_export_ops = {
361 .fh_to_dentry = ext2_fh_to_dentry,
362 .fh_to_parent = ext2_fh_to_parent,
363 .get_parent = ext2_get_parent,
364 };
365
366 static unsigned long get_sb_block(void **data)
367 {
368 unsigned long sb_block;
369 char *options = (char *) *data;
370
371 if (!options || strncmp(options, "sb=", 3) != 0)
372 return 1; /* Default location */
373 options += 3;
374 sb_block = simple_strtoul(options, &options, 0);
375 if (*options && *options != ',') {
376 printk("EXT2-fs: Invalid sb specification: %s\n",
377 (char *) *data);
378 return 1;
379 }
380 if (*options == ',')
381 options++;
382 *data = (void *) options;
383 return sb_block;
384 }
385
386 enum {
387 Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
388 Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
389 Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
390 Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
391 Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
392 Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
393 };
394
395 static const match_table_t tokens = {
396 {Opt_bsd_df, "bsddf"},
397 {Opt_minix_df, "minixdf"},
398 {Opt_grpid, "grpid"},
399 {Opt_grpid, "bsdgroups"},
400 {Opt_nogrpid, "nogrpid"},
401 {Opt_nogrpid, "sysvgroups"},
402 {Opt_resgid, "resgid=%u"},
403 {Opt_resuid, "resuid=%u"},
404 {Opt_sb, "sb=%u"},
405 {Opt_err_cont, "errors=continue"},
406 {Opt_err_panic, "errors=panic"},
407 {Opt_err_ro, "errors=remount-ro"},
408 {Opt_nouid32, "nouid32"},
409 {Opt_nocheck, "check=none"},
410 {Opt_nocheck, "nocheck"},
411 {Opt_debug, "debug"},
412 {Opt_oldalloc, "oldalloc"},
413 {Opt_orlov, "orlov"},
414 {Opt_nobh, "nobh"},
415 {Opt_user_xattr, "user_xattr"},
416 {Opt_nouser_xattr, "nouser_xattr"},
417 {Opt_acl, "acl"},
418 {Opt_noacl, "noacl"},
419 {Opt_xip, "xip"},
420 {Opt_grpquota, "grpquota"},
421 {Opt_ignore, "noquota"},
422 {Opt_quota, "quota"},
423 {Opt_usrquota, "usrquota"},
424 {Opt_reservation, "reservation"},
425 {Opt_noreservation, "noreservation"},
426 {Opt_err, NULL}
427 };
428
429 static int parse_options(char *options, struct super_block *sb)
430 {
431 char *p;
432 struct ext2_sb_info *sbi = EXT2_SB(sb);
433 substring_t args[MAX_OPT_ARGS];
434 int option;
435
436 if (!options)
437 return 1;
438
439 while ((p = strsep (&options, ",")) != NULL) {
440 int token;
441 if (!*p)
442 continue;
443
444 token = match_token(p, tokens, args);
445 switch (token) {
446 case Opt_bsd_df:
447 clear_opt (sbi->s_mount_opt, MINIX_DF);
448 break;
449 case Opt_minix_df:
450 set_opt (sbi->s_mount_opt, MINIX_DF);
451 break;
452 case Opt_grpid:
453 set_opt (sbi->s_mount_opt, GRPID);
454 break;
455 case Opt_nogrpid:
456 clear_opt (sbi->s_mount_opt, GRPID);
457 break;
458 case Opt_resuid:
459 if (match_int(&args[0], &option))
460 return 0;
461 sbi->s_resuid = option;
462 break;
463 case Opt_resgid:
464 if (match_int(&args[0], &option))
465 return 0;
466 sbi->s_resgid = option;
467 break;
468 case Opt_sb:
469 /* handled by get_sb_block() instead of here */
470 /* *sb_block = match_int(&args[0]); */
471 break;
472 case Opt_err_panic:
473 clear_opt (sbi->s_mount_opt, ERRORS_CONT);
474 clear_opt (sbi->s_mount_opt, ERRORS_RO);
475 set_opt (sbi->s_mount_opt, ERRORS_PANIC);
476 break;
477 case Opt_err_ro:
478 clear_opt (sbi->s_mount_opt, ERRORS_CONT);
479 clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
480 set_opt (sbi->s_mount_opt, ERRORS_RO);
481 break;
482 case Opt_err_cont:
483 clear_opt (sbi->s_mount_opt, ERRORS_RO);
484 clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
485 set_opt (sbi->s_mount_opt, ERRORS_CONT);
486 break;
487 case Opt_nouid32:
488 set_opt (sbi->s_mount_opt, NO_UID32);
489 break;
490 case Opt_nocheck:
491 clear_opt (sbi->s_mount_opt, CHECK);
492 break;
493 case Opt_debug:
494 set_opt (sbi->s_mount_opt, DEBUG);
495 break;
496 case Opt_oldalloc:
497 set_opt (sbi->s_mount_opt, OLDALLOC);
498 break;
499 case Opt_orlov:
500 clear_opt (sbi->s_mount_opt, OLDALLOC);
501 break;
502 case Opt_nobh:
503 set_opt (sbi->s_mount_opt, NOBH);
504 break;
505 #ifdef CONFIG_EXT2_FS_XATTR
506 case Opt_user_xattr:
507 set_opt (sbi->s_mount_opt, XATTR_USER);
508 break;
509 case Opt_nouser_xattr:
510 clear_opt (sbi->s_mount_opt, XATTR_USER);
511 break;
512 #else
513 case Opt_user_xattr:
514 case Opt_nouser_xattr:
515 ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
516 "not supported");
517 break;
518 #endif
519 #ifdef CONFIG_EXT2_FS_POSIX_ACL
520 case Opt_acl:
521 set_opt(sbi->s_mount_opt, POSIX_ACL);
522 break;
523 case Opt_noacl:
524 clear_opt(sbi->s_mount_opt, POSIX_ACL);
525 break;
526 #else
527 case Opt_acl:
528 case Opt_noacl:
529 ext2_msg(sb, KERN_INFO,
530 "(no)acl options not supported");
531 break;
532 #endif
533 case Opt_xip:
534 #ifdef CONFIG_EXT2_FS_XIP
535 set_opt (sbi->s_mount_opt, XIP);
536 #else
537 ext2_msg(sb, KERN_INFO, "xip option not supported");
538 #endif
539 break;
540
541 #if defined(CONFIG_QUOTA)
542 case Opt_quota:
543 case Opt_usrquota:
544 set_opt(sbi->s_mount_opt, USRQUOTA);
545 break;
546
547 case Opt_grpquota:
548 set_opt(sbi->s_mount_opt, GRPQUOTA);
549 break;
550 #else
551 case Opt_quota:
552 case Opt_usrquota:
553 case Opt_grpquota:
554 ext2_msg(sb, KERN_INFO,
555 "quota operations not supported");
556 break;
557 #endif
558
559 case Opt_reservation:
560 set_opt(sbi->s_mount_opt, RESERVATION);
561 ext2_msg(sb, KERN_INFO, "reservations ON");
562 break;
563 case Opt_noreservation:
564 clear_opt(sbi->s_mount_opt, RESERVATION);
565 ext2_msg(sb, KERN_INFO, "reservations OFF");
566 break;
567 case Opt_ignore:
568 break;
569 default:
570 return 0;
571 }
572 }
573 return 1;
574 }
575
576 static int ext2_setup_super (struct super_block * sb,
577 struct ext2_super_block * es,
578 int read_only)
579 {
580 int res = 0;
581 struct ext2_sb_info *sbi = EXT2_SB(sb);
582
583 if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
584 ext2_msg(sb, KERN_ERR,
585 "error: revision level too high, "
586 "forcing read-only mode");
587 res = MS_RDONLY;
588 }
589 if (read_only)
590 return res;
591 if (!(sbi->s_mount_state & EXT2_VALID_FS))
592 ext2_msg(sb, KERN_WARNING,
593 "warning: mounting unchecked fs, "
594 "running e2fsck is recommended");
595 else if ((sbi->s_mount_state & EXT2_ERROR_FS))
596 ext2_msg(sb, KERN_WARNING,
597 "warning: mounting fs with errors, "
598 "running e2fsck is recommended");
599 else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
600 le16_to_cpu(es->s_mnt_count) >=
601 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
602 ext2_msg(sb, KERN_WARNING,
603 "warning: maximal mount count reached, "
604 "running e2fsck is recommended");
605 else if (le32_to_cpu(es->s_checkinterval) &&
606 (le32_to_cpu(es->s_lastcheck) +
607 le32_to_cpu(es->s_checkinterval) <= get_seconds()))
608 ext2_msg(sb, KERN_WARNING,
609 "warning: checktime reached, "
610 "running e2fsck is recommended");
611 if (!le16_to_cpu(es->s_max_mnt_count))
612 es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
613 le16_add_cpu(&es->s_mnt_count, 1);
614 if (test_opt (sb, DEBUG))
615 ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
616 "bpg=%lu, ipg=%lu, mo=%04lx]",
617 EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
618 sbi->s_frag_size,
619 sbi->s_groups_count,
620 EXT2_BLOCKS_PER_GROUP(sb),
621 EXT2_INODES_PER_GROUP(sb),
622 sbi->s_mount_opt);
623 return res;
624 }
625
626 static int ext2_check_descriptors(struct super_block *sb)
627 {
628 int i;
629 struct ext2_sb_info *sbi = EXT2_SB(sb);
630
631 ext2_debug ("Checking group descriptors");
632
633 for (i = 0; i < sbi->s_groups_count; i++) {
634 struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
635 ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
636 ext2_fsblk_t last_block;
637
638 if (i == sbi->s_groups_count - 1)
639 last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
640 else
641 last_block = first_block +
642 (EXT2_BLOCKS_PER_GROUP(sb) - 1);
643
644 if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
645 le32_to_cpu(gdp->bg_block_bitmap) > last_block)
646 {
647 ext2_error (sb, "ext2_check_descriptors",
648 "Block bitmap for group %d"
649 " not in group (block %lu)!",
650 i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
651 return 0;
652 }
653 if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
654 le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
655 {
656 ext2_error (sb, "ext2_check_descriptors",
657 "Inode bitmap for group %d"
658 " not in group (block %lu)!",
659 i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
660 return 0;
661 }
662 if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
663 le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
664 last_block)
665 {
666 ext2_error (sb, "ext2_check_descriptors",
667 "Inode table for group %d"
668 " not in group (block %lu)!",
669 i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
670 return 0;
671 }
672 }
673 return 1;
674 }
675
676 /*
677 * Maximal file size. There is a direct, and {,double-,triple-}indirect
678 * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
679 * We need to be 1 filesystem block less than the 2^32 sector limit.
680 */
681 static loff_t ext2_max_size(int bits)
682 {
683 loff_t res = EXT2_NDIR_BLOCKS;
684 int meta_blocks;
685 loff_t upper_limit;
686
687 /* This is calculated to be the largest file size for a
688 * dense, file such that the total number of
689 * sectors in the file, including data and all indirect blocks,
690 * does not exceed 2^32 -1
691 * __u32 i_blocks representing the total number of
692 * 512 bytes blocks of the file
693 */
694 upper_limit = (1LL << 32) - 1;
695
696 /* total blocks in file system block size */
697 upper_limit >>= (bits - 9);
698
699
700 /* indirect blocks */
701 meta_blocks = 1;
702 /* double indirect blocks */
703 meta_blocks += 1 + (1LL << (bits-2));
704 /* tripple indirect blocks */
705 meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
706
707 upper_limit -= meta_blocks;
708 upper_limit <<= bits;
709
710 res += 1LL << (bits-2);
711 res += 1LL << (2*(bits-2));
712 res += 1LL << (3*(bits-2));
713 res <<= bits;
714 if (res > upper_limit)
715 res = upper_limit;
716
717 if (res > MAX_LFS_FILESIZE)
718 res = MAX_LFS_FILESIZE;
719
720 return res;
721 }
722
723 static unsigned long descriptor_loc(struct super_block *sb,
724 unsigned long logic_sb_block,
725 int nr)
726 {
727 struct ext2_sb_info *sbi = EXT2_SB(sb);
728 unsigned long bg, first_meta_bg;
729 int has_super = 0;
730
731 first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
732
733 if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
734 nr < first_meta_bg)
735 return (logic_sb_block + nr + 1);
736 bg = sbi->s_desc_per_block * nr;
737 if (ext2_bg_has_super(sb, bg))
738 has_super = 1;
739
740 return ext2_group_first_block_no(sb, bg) + has_super;
741 }
742
743 static int ext2_fill_super(struct super_block *sb, void *data, int silent)
744 {
745 struct buffer_head * bh;
746 struct ext2_sb_info * sbi;
747 struct ext2_super_block * es;
748 struct inode *root;
749 unsigned long block;
750 unsigned long sb_block = get_sb_block(&data);
751 unsigned long logic_sb_block;
752 unsigned long offset = 0;
753 unsigned long def_mount_opts;
754 long ret = -EINVAL;
755 int blocksize = BLOCK_SIZE;
756 int db_count;
757 int i, j;
758 __le32 features;
759 int err;
760
761 err = -ENOMEM;
762 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
763 if (!sbi)
764 goto failed_unlock;
765
766 sbi->s_blockgroup_lock =
767 kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
768 if (!sbi->s_blockgroup_lock) {
769 kfree(sbi);
770 goto failed_unlock;
771 }
772 sb->s_fs_info = sbi;
773 sbi->s_sb_block = sb_block;
774
775 spin_lock_init(&sbi->s_lock);
776
777 /*
778 * See what the current blocksize for the device is, and
779 * use that as the blocksize. Otherwise (or if the blocksize
780 * is smaller than the default) use the default.
781 * This is important for devices that have a hardware
782 * sectorsize that is larger than the default.
783 */
784 blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
785 if (!blocksize) {
786 ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
787 goto failed_sbi;
788 }
789
790 /*
791 * If the superblock doesn't start on a hardware sector boundary,
792 * calculate the offset.
793 */
794 if (blocksize != BLOCK_SIZE) {
795 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
796 offset = (sb_block*BLOCK_SIZE) % blocksize;
797 } else {
798 logic_sb_block = sb_block;
799 }
800
801 if (!(bh = sb_bread(sb, logic_sb_block))) {
802 ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
803 goto failed_sbi;
804 }
805 /*
806 * Note: s_es must be initialized as soon as possible because
807 * some ext2 macro-instructions depend on its value
808 */
809 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
810 sbi->s_es = es;
811 sb->s_magic = le16_to_cpu(es->s_magic);
812
813 if (sb->s_magic != EXT2_SUPER_MAGIC)
814 goto cantfind_ext2;
815
816 /* Set defaults before we parse the mount options */
817 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
818 if (def_mount_opts & EXT2_DEFM_DEBUG)
819 set_opt(sbi->s_mount_opt, DEBUG);
820 if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
821 set_opt(sbi->s_mount_opt, GRPID);
822 if (def_mount_opts & EXT2_DEFM_UID16)
823 set_opt(sbi->s_mount_opt, NO_UID32);
824 #ifdef CONFIG_EXT2_FS_XATTR
825 if (def_mount_opts & EXT2_DEFM_XATTR_USER)
826 set_opt(sbi->s_mount_opt, XATTR_USER);
827 #endif
828 #ifdef CONFIG_EXT2_FS_POSIX_ACL
829 if (def_mount_opts & EXT2_DEFM_ACL)
830 set_opt(sbi->s_mount_opt, POSIX_ACL);
831 #endif
832
833 if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
834 set_opt(sbi->s_mount_opt, ERRORS_PANIC);
835 else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
836 set_opt(sbi->s_mount_opt, ERRORS_CONT);
837 else
838 set_opt(sbi->s_mount_opt, ERRORS_RO);
839
840 sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
841 sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
842
843 set_opt(sbi->s_mount_opt, RESERVATION);
844
845 if (!parse_options((char *) data, sb))
846 goto failed_mount;
847
848 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
849 ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
850 MS_POSIXACL : 0);
851
852 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
853 EXT2_MOUNT_XIP if not */
854
855 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
856 (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
857 EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
858 EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
859 ext2_msg(sb, KERN_WARNING,
860 "warning: feature flags set on rev 0 fs, "
861 "running e2fsck is recommended");
862 /*
863 * Check feature flags regardless of the revision level, since we
864 * previously didn't change the revision level when setting the flags,
865 * so there is a chance incompat flags are set on a rev 0 filesystem.
866 */
867 features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
868 if (features) {
869 ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
870 "unsupported optional features (%x)",
871 le32_to_cpu(features));
872 goto failed_mount;
873 }
874 if (!(sb->s_flags & MS_RDONLY) &&
875 (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
876 ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
877 "unsupported optional features (%x)",
878 le32_to_cpu(features));
879 goto failed_mount;
880 }
881
882 blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
883
884 if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
885 if (!silent)
886 ext2_msg(sb, KERN_ERR,
887 "error: unsupported blocksize for xip");
888 goto failed_mount;
889 }
890
891 /* If the blocksize doesn't match, re-read the thing.. */
892 if (sb->s_blocksize != blocksize) {
893 brelse(bh);
894
895 if (!sb_set_blocksize(sb, blocksize)) {
896 ext2_msg(sb, KERN_ERR,
897 "error: bad blocksize %d", blocksize);
898 goto failed_sbi;
899 }
900
901 logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
902 offset = (sb_block*BLOCK_SIZE) % blocksize;
903 bh = sb_bread(sb, logic_sb_block);
904 if(!bh) {
905 ext2_msg(sb, KERN_ERR, "error: couldn't read"
906 "superblock on 2nd try");
907 goto failed_sbi;
908 }
909 es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
910 sbi->s_es = es;
911 if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
912 ext2_msg(sb, KERN_ERR, "error: magic mismatch");
913 goto failed_mount;
914 }
915 }
916
917 sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
918 sb->s_max_links = EXT2_LINK_MAX;
919
920 if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
921 sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
922 sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
923 } else {
924 sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
925 sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
926 if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
927 !is_power_of_2(sbi->s_inode_size) ||
928 (sbi->s_inode_size > blocksize)) {
929 ext2_msg(sb, KERN_ERR,
930 "error: unsupported inode size: %d",
931 sbi->s_inode_size);
932 goto failed_mount;
933 }
934 }
935
936 sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
937 le32_to_cpu(es->s_log_frag_size);
938 if (sbi->s_frag_size == 0)
939 goto cantfind_ext2;
940 sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
941
942 sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
943 sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
944 sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
945
946 if (EXT2_INODE_SIZE(sb) == 0)
947 goto cantfind_ext2;
948 sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
949 if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
950 goto cantfind_ext2;
951 sbi->s_itb_per_group = sbi->s_inodes_per_group /
952 sbi->s_inodes_per_block;
953 sbi->s_desc_per_block = sb->s_blocksize /
954 sizeof (struct ext2_group_desc);
955 sbi->s_sbh = bh;
956 sbi->s_mount_state = le16_to_cpu(es->s_state);
957 sbi->s_addr_per_block_bits =
958 ilog2 (EXT2_ADDR_PER_BLOCK(sb));
959 sbi->s_desc_per_block_bits =
960 ilog2 (EXT2_DESC_PER_BLOCK(sb));
961
962 if (sb->s_magic != EXT2_SUPER_MAGIC)
963 goto cantfind_ext2;
964
965 if (sb->s_blocksize != bh->b_size) {
966 if (!silent)
967 ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
968 goto failed_mount;
969 }
970
971 if (sb->s_blocksize != sbi->s_frag_size) {
972 ext2_msg(sb, KERN_ERR,
973 "error: fragsize %lu != blocksize %lu"
974 "(not supported yet)",
975 sbi->s_frag_size, sb->s_blocksize);
976 goto failed_mount;
977 }
978
979 if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
980 ext2_msg(sb, KERN_ERR,
981 "error: #blocks per group too big: %lu",
982 sbi->s_blocks_per_group);
983 goto failed_mount;
984 }
985 if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
986 ext2_msg(sb, KERN_ERR,
987 "error: #fragments per group too big: %lu",
988 sbi->s_frags_per_group);
989 goto failed_mount;
990 }
991 if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
992 ext2_msg(sb, KERN_ERR,
993 "error: #inodes per group too big: %lu",
994 sbi->s_inodes_per_group);
995 goto failed_mount;
996 }
997
998 if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
999 goto cantfind_ext2;
1000 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
1001 le32_to_cpu(es->s_first_data_block) - 1)
1002 / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
1003 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
1004 EXT2_DESC_PER_BLOCK(sb);
1005 sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
1006 if (sbi->s_group_desc == NULL) {
1007 ext2_msg(sb, KERN_ERR, "error: not enough memory");
1008 goto failed_mount;
1009 }
1010 bgl_lock_init(sbi->s_blockgroup_lock);
1011 sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
1012 if (!sbi->s_debts) {
1013 ext2_msg(sb, KERN_ERR, "error: not enough memory");
1014 goto failed_mount_group_desc;
1015 }
1016 for (i = 0; i < db_count; i++) {
1017 block = descriptor_loc(sb, logic_sb_block, i);
1018 sbi->s_group_desc[i] = sb_bread(sb, block);
1019 if (!sbi->s_group_desc[i]) {
1020 for (j = 0; j < i; j++)
1021 brelse (sbi->s_group_desc[j]);
1022 ext2_msg(sb, KERN_ERR,
1023 "error: unable to read group descriptors");
1024 goto failed_mount_group_desc;
1025 }
1026 }
1027 if (!ext2_check_descriptors (sb)) {
1028 ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
1029 goto failed_mount2;
1030 }
1031 sbi->s_gdb_count = db_count;
1032 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
1033 spin_lock_init(&sbi->s_next_gen_lock);
1034
1035 /* per fileystem reservation list head & lock */
1036 spin_lock_init(&sbi->s_rsv_window_lock);
1037 sbi->s_rsv_window_root = RB_ROOT;
1038 /*
1039 * Add a single, static dummy reservation to the start of the
1040 * reservation window list --- it gives us a placeholder for
1041 * append-at-start-of-list which makes the allocation logic
1042 * _much_ simpler.
1043 */
1044 sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1045 sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
1046 sbi->s_rsv_window_head.rsv_alloc_hit = 0;
1047 sbi->s_rsv_window_head.rsv_goal_size = 0;
1048 ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
1049
1050 err = percpu_counter_init(&sbi->s_freeblocks_counter,
1051 ext2_count_free_blocks(sb));
1052 if (!err) {
1053 err = percpu_counter_init(&sbi->s_freeinodes_counter,
1054 ext2_count_free_inodes(sb));
1055 }
1056 if (!err) {
1057 err = percpu_counter_init(&sbi->s_dirs_counter,
1058 ext2_count_dirs(sb));
1059 }
1060 if (err) {
1061 ext2_msg(sb, KERN_ERR, "error: insufficient memory");
1062 goto failed_mount3;
1063 }
1064 /*
1065 * set up enough so that it can read an inode
1066 */
1067 sb->s_op = &ext2_sops;
1068 sb->s_export_op = &ext2_export_ops;
1069 sb->s_xattr = ext2_xattr_handlers;
1070
1071 #ifdef CONFIG_QUOTA
1072 sb->dq_op = &dquot_operations;
1073 sb->s_qcop = &dquot_quotactl_ops;
1074 #endif
1075
1076 root = ext2_iget(sb, EXT2_ROOT_INO);
1077 if (IS_ERR(root)) {
1078 ret = PTR_ERR(root);
1079 goto failed_mount3;
1080 }
1081 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
1082 iput(root);
1083 ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
1084 goto failed_mount3;
1085 }
1086
1087 sb->s_root = d_make_root(root);
1088 if (!sb->s_root) {
1089 ext2_msg(sb, KERN_ERR, "error: get root inode failed");
1090 ret = -ENOMEM;
1091 goto failed_mount3;
1092 }
1093 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
1094 ext2_msg(sb, KERN_WARNING,
1095 "warning: mounting ext3 filesystem as ext2");
1096 if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
1097 sb->s_flags |= MS_RDONLY;
1098 ext2_write_super(sb);
1099 return 0;
1100
1101 cantfind_ext2:
1102 if (!silent)
1103 ext2_msg(sb, KERN_ERR,
1104 "error: can't find an ext2 filesystem on dev %s.",
1105 sb->s_id);
1106 goto failed_mount;
1107 failed_mount3:
1108 percpu_counter_destroy(&sbi->s_freeblocks_counter);
1109 percpu_counter_destroy(&sbi->s_freeinodes_counter);
1110 percpu_counter_destroy(&sbi->s_dirs_counter);
1111 failed_mount2:
1112 for (i = 0; i < db_count; i++)
1113 brelse(sbi->s_group_desc[i]);
1114 failed_mount_group_desc:
1115 kfree(sbi->s_group_desc);
1116 kfree(sbi->s_debts);
1117 failed_mount:
1118 brelse(bh);
1119 failed_sbi:
1120 sb->s_fs_info = NULL;
1121 kfree(sbi->s_blockgroup_lock);
1122 kfree(sbi);
1123 failed_unlock:
1124 return ret;
1125 }
1126
1127 static void ext2_clear_super_error(struct super_block *sb)
1128 {
1129 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1130
1131 if (buffer_write_io_error(sbh)) {
1132 /*
1133 * Oh, dear. A previous attempt to write the
1134 * superblock failed. This could happen because the
1135 * USB device was yanked out. Or it could happen to
1136 * be a transient write error and maybe the block will
1137 * be remapped. Nothing we can do but to retry the
1138 * write and hope for the best.
1139 */
1140 ext2_msg(sb, KERN_ERR,
1141 "previous I/O error to superblock detected\n");
1142 clear_buffer_write_io_error(sbh);
1143 set_buffer_uptodate(sbh);
1144 }
1145 }
1146
1147 static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
1148 int wait)
1149 {
1150 ext2_clear_super_error(sb);
1151 spin_lock(&EXT2_SB(sb)->s_lock);
1152 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1153 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1154 es->s_wtime = cpu_to_le32(get_seconds());
1155 /* unlock before we do IO */
1156 spin_unlock(&EXT2_SB(sb)->s_lock);
1157 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1158 if (wait)
1159 sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
1160 }
1161
1162 /*
1163 * In the second extended file system, it is not necessary to
1164 * write the super block since we use a mapping of the
1165 * disk super block in a buffer.
1166 *
1167 * However, this function is still used to set the fs valid
1168 * flags to 0. We need to set this flag to 0 since the fs
1169 * may have been checked while mounted and e2fsck may have
1170 * set s_state to EXT2_VALID_FS after some corrections.
1171 */
1172 static int ext2_sync_fs(struct super_block *sb, int wait)
1173 {
1174 struct ext2_sb_info *sbi = EXT2_SB(sb);
1175 struct ext2_super_block *es = EXT2_SB(sb)->s_es;
1176
1177 spin_lock(&sbi->s_lock);
1178 if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
1179 ext2_debug("setting valid to 0\n");
1180 es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
1181 }
1182 spin_unlock(&sbi->s_lock);
1183 ext2_sync_super(sb, es, wait);
1184 return 0;
1185 }
1186
1187
1188 void ext2_write_super(struct super_block *sb)
1189 {
1190 if (!(sb->s_flags & MS_RDONLY))
1191 ext2_sync_fs(sb, 1);
1192 }
1193
1194 static int ext2_remount (struct super_block * sb, int * flags, char * data)
1195 {
1196 struct ext2_sb_info * sbi = EXT2_SB(sb);
1197 struct ext2_super_block * es;
1198 unsigned long old_mount_opt = sbi->s_mount_opt;
1199 struct ext2_mount_options old_opts;
1200 unsigned long old_sb_flags;
1201 int err;
1202
1203 spin_lock(&sbi->s_lock);
1204
1205 /* Store the old options */
1206 old_sb_flags = sb->s_flags;
1207 old_opts.s_mount_opt = sbi->s_mount_opt;
1208 old_opts.s_resuid = sbi->s_resuid;
1209 old_opts.s_resgid = sbi->s_resgid;
1210
1211 /*
1212 * Allow the "check" option to be passed as a remount option.
1213 */
1214 if (!parse_options(data, sb)) {
1215 err = -EINVAL;
1216 goto restore_opts;
1217 }
1218
1219 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
1220 ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
1221
1222 ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
1223 EXT2_MOUNT_XIP if not */
1224
1225 if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
1226 ext2_msg(sb, KERN_WARNING,
1227 "warning: unsupported blocksize for xip");
1228 err = -EINVAL;
1229 goto restore_opts;
1230 }
1231
1232 es = sbi->s_es;
1233 if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
1234 ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
1235 "xip flag with busy inodes while remounting");
1236 sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
1237 sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
1238 }
1239 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
1240 spin_unlock(&sbi->s_lock);
1241 return 0;
1242 }
1243 if (*flags & MS_RDONLY) {
1244 if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
1245 !(sbi->s_mount_state & EXT2_VALID_FS)) {
1246 spin_unlock(&sbi->s_lock);
1247 return 0;
1248 }
1249
1250 /*
1251 * OK, we are remounting a valid rw partition rdonly, so set
1252 * the rdonly flag and then mark the partition as valid again.
1253 */
1254 es->s_state = cpu_to_le16(sbi->s_mount_state);
1255 es->s_mtime = cpu_to_le32(get_seconds());
1256 spin_unlock(&sbi->s_lock);
1257
1258 err = dquot_suspend(sb, -1);
1259 if (err < 0) {
1260 spin_lock(&sbi->s_lock);
1261 goto restore_opts;
1262 }
1263
1264 ext2_sync_super(sb, es, 1);
1265 } else {
1266 __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
1267 ~EXT2_FEATURE_RO_COMPAT_SUPP);
1268 if (ret) {
1269 ext2_msg(sb, KERN_WARNING,
1270 "warning: couldn't remount RDWR because of "
1271 "unsupported optional features (%x).",
1272 le32_to_cpu(ret));
1273 err = -EROFS;
1274 goto restore_opts;
1275 }
1276 /*
1277 * Mounting a RDONLY partition read-write, so reread and
1278 * store the current valid flag. (It may have been changed
1279 * by e2fsck since we originally mounted the partition.)
1280 */
1281 sbi->s_mount_state = le16_to_cpu(es->s_state);
1282 if (!ext2_setup_super (sb, es, 0))
1283 sb->s_flags &= ~MS_RDONLY;
1284 spin_unlock(&sbi->s_lock);
1285
1286 ext2_write_super(sb);
1287
1288 dquot_resume(sb, -1);
1289 }
1290
1291 return 0;
1292 restore_opts:
1293 sbi->s_mount_opt = old_opts.s_mount_opt;
1294 sbi->s_resuid = old_opts.s_resuid;
1295 sbi->s_resgid = old_opts.s_resgid;
1296 sb->s_flags = old_sb_flags;
1297 spin_unlock(&sbi->s_lock);
1298 return err;
1299 }
1300
1301 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
1302 {
1303 struct super_block *sb = dentry->d_sb;
1304 struct ext2_sb_info *sbi = EXT2_SB(sb);
1305 struct ext2_super_block *es = sbi->s_es;
1306 u64 fsid;
1307
1308 spin_lock(&sbi->s_lock);
1309
1310 if (test_opt (sb, MINIX_DF))
1311 sbi->s_overhead_last = 0;
1312 else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
1313 unsigned long i, overhead = 0;
1314 smp_rmb();
1315
1316 /*
1317 * Compute the overhead (FS structures). This is constant
1318 * for a given filesystem unless the number of block groups
1319 * changes so we cache the previous value until it does.
1320 */
1321
1322 /*
1323 * All of the blocks before first_data_block are
1324 * overhead
1325 */
1326 overhead = le32_to_cpu(es->s_first_data_block);
1327
1328 /*
1329 * Add the overhead attributed to the superblock and
1330 * block group descriptors. If the sparse superblocks
1331 * feature is turned on, then not all groups have this.
1332 */
1333 for (i = 0; i < sbi->s_groups_count; i++)
1334 overhead += ext2_bg_has_super(sb, i) +
1335 ext2_bg_num_gdb(sb, i);
1336
1337 /*
1338 * Every block group has an inode bitmap, a block
1339 * bitmap, and an inode table.
1340 */
1341 overhead += (sbi->s_groups_count *
1342 (2 + sbi->s_itb_per_group));
1343 sbi->s_overhead_last = overhead;
1344 smp_wmb();
1345 sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
1346 }
1347
1348 buf->f_type = EXT2_SUPER_MAGIC;
1349 buf->f_bsize = sb->s_blocksize;
1350 buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
1351 buf->f_bfree = ext2_count_free_blocks(sb);
1352 es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
1353 buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
1354 if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
1355 buf->f_bavail = 0;
1356 buf->f_files = le32_to_cpu(es->s_inodes_count);
1357 buf->f_ffree = ext2_count_free_inodes(sb);
1358 es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
1359 buf->f_namelen = EXT2_NAME_LEN;
1360 fsid = le64_to_cpup((void *)es->s_uuid) ^
1361 le64_to_cpup((void *)es->s_uuid + sizeof(u64));
1362 buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
1363 buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
1364 spin_unlock(&sbi->s_lock);
1365 return 0;
1366 }
1367
1368 static struct dentry *ext2_mount(struct file_system_type *fs_type,
1369 int flags, const char *dev_name, void *data)
1370 {
1371 return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
1372 }
1373
1374 #ifdef CONFIG_QUOTA
1375
1376 /* Read data from quotafile - avoid pagecache and such because we cannot afford
1377 * acquiring the locks... As quota files are never truncated and quota code
1378 * itself serializes the operations (and no one else should touch the files)
1379 * we don't have to be afraid of races */
1380 static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
1381 size_t len, loff_t off)
1382 {
1383 struct inode *inode = sb_dqopt(sb)->files[type];
1384 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1385 int err = 0;
1386 int offset = off & (sb->s_blocksize - 1);
1387 int tocopy;
1388 size_t toread;
1389 struct buffer_head tmp_bh;
1390 struct buffer_head *bh;
1391 loff_t i_size = i_size_read(inode);
1392
1393 if (off > i_size)
1394 return 0;
1395 if (off+len > i_size)
1396 len = i_size-off;
1397 toread = len;
1398 while (toread > 0) {
1399 tocopy = sb->s_blocksize - offset < toread ?
1400 sb->s_blocksize - offset : toread;
1401
1402 tmp_bh.b_state = 0;
1403 tmp_bh.b_size = sb->s_blocksize;
1404 err = ext2_get_block(inode, blk, &tmp_bh, 0);
1405 if (err < 0)
1406 return err;
1407 if (!buffer_mapped(&tmp_bh)) /* A hole? */
1408 memset(data, 0, tocopy);
1409 else {
1410 bh = sb_bread(sb, tmp_bh.b_blocknr);
1411 if (!bh)
1412 return -EIO;
1413 memcpy(data, bh->b_data+offset, tocopy);
1414 brelse(bh);
1415 }
1416 offset = 0;
1417 toread -= tocopy;
1418 data += tocopy;
1419 blk++;
1420 }
1421 return len;
1422 }
1423
1424 /* Write to quotafile */
1425 static ssize_t ext2_quota_write(struct super_block *sb, int type,
1426 const char *data, size_t len, loff_t off)
1427 {
1428 struct inode *inode = sb_dqopt(sb)->files[type];
1429 sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
1430 int err = 0;
1431 int offset = off & (sb->s_blocksize - 1);
1432 int tocopy;
1433 size_t towrite = len;
1434 struct buffer_head tmp_bh;
1435 struct buffer_head *bh;
1436
1437 while (towrite > 0) {
1438 tocopy = sb->s_blocksize - offset < towrite ?
1439 sb->s_blocksize - offset : towrite;
1440
1441 tmp_bh.b_state = 0;
1442 err = ext2_get_block(inode, blk, &tmp_bh, 1);
1443 if (err < 0)
1444 goto out;
1445 if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
1446 bh = sb_bread(sb, tmp_bh.b_blocknr);
1447 else
1448 bh = sb_getblk(sb, tmp_bh.b_blocknr);
1449 if (!bh) {
1450 err = -EIO;
1451 goto out;
1452 }
1453 lock_buffer(bh);
1454 memcpy(bh->b_data+offset, data, tocopy);
1455 flush_dcache_page(bh->b_page);
1456 set_buffer_uptodate(bh);
1457 mark_buffer_dirty(bh);
1458 unlock_buffer(bh);
1459 brelse(bh);
1460 offset = 0;
1461 towrite -= tocopy;
1462 data += tocopy;
1463 blk++;
1464 }
1465 out:
1466 if (len == towrite)
1467 return err;
1468 if (inode->i_size < off+len-towrite)
1469 i_size_write(inode, off+len-towrite);
1470 inode->i_version++;
1471 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1472 mark_inode_dirty(inode);
1473 return len - towrite;
1474 }
1475
1476 #endif
1477
1478 static struct file_system_type ext2_fs_type = {
1479 .owner = THIS_MODULE,
1480 .name = "ext2",
1481 .mount = ext2_mount,
1482 .kill_sb = kill_block_super,
1483 .fs_flags = FS_REQUIRES_DEV,
1484 };
1485
1486 static int __init init_ext2_fs(void)
1487 {
1488 int err = init_ext2_xattr();
1489 if (err)
1490 return err;
1491 err = init_inodecache();
1492 if (err)
1493 goto out1;
1494 err = register_filesystem(&ext2_fs_type);
1495 if (err)
1496 goto out;
1497 return 0;
1498 out:
1499 destroy_inodecache();
1500 out1:
1501 exit_ext2_xattr();
1502 return err;
1503 }
1504
1505 static void __exit exit_ext2_fs(void)
1506 {
1507 unregister_filesystem(&ext2_fs_type);
1508 destroy_inodecache();
1509 exit_ext2_xattr();
1510 }
1511
1512 MODULE_AUTHOR("Remy Card and others");
1513 MODULE_DESCRIPTION("Second Extended Filesystem");
1514 MODULE_LICENSE("GPL");
1515 module_init(init_ext2_fs)
1516 module_exit(exit_ext2_fs)
This page took 0.08873 seconds and 5 git commands to generate.