28cca01ca9c9c2b29b20d16581c430d900101a78
[deliverable/linux.git] / fs / cramfs / inode.c
1 /*
2 * Compressed rom filesystem for Linux.
3 *
4 * Copyright (C) 1999 Linus Torvalds.
5 *
6 * This file is released under the GPL.
7 */
8
9 /*
10 * These are the VFS interfaces to the compressed rom filesystem.
11 * The actual compression is based on zlib, see the other files.
12 */
13
14 #include <linux/module.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/init.h>
18 #include <linux/string.h>
19 #include <linux/blkdev.h>
20 #include <linux/cramfs_fs.h>
21 #include <linux/slab.h>
22 #include <linux/cramfs_fs_sb.h>
23 #include <linux/vfs.h>
24 #include <linux/mutex.h>
25
26 #include <asm/uaccess.h>
27
28 static const struct super_operations cramfs_ops;
29 static const struct inode_operations cramfs_dir_inode_operations;
30 static const struct file_operations cramfs_directory_operations;
31 static const struct address_space_operations cramfs_aops;
32
33 static DEFINE_MUTEX(read_mutex);
34
35
36 /* These macros may change in future, to provide better st_ino semantics. */
37 #define OFFSET(x) ((x)->i_ino)
38
39 static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
40 {
41 if (!cino->offset)
42 return offset + 1;
43 if (!cino->size)
44 return offset + 1;
45
46 /*
47 * The file mode test fixes buggy mkcramfs implementations where
48 * cramfs_inode->offset is set to a non zero value for entries
49 * which did not contain data, like devices node and fifos.
50 */
51 switch (cino->mode & S_IFMT) {
52 case S_IFREG:
53 case S_IFDIR:
54 case S_IFLNK:
55 return cino->offset << 2;
56 default:
57 break;
58 }
59 return offset + 1;
60 }
61
62 static struct inode *get_cramfs_inode(struct super_block *sb,
63 const struct cramfs_inode *cramfs_inode, unsigned int offset)
64 {
65 struct inode *inode;
66 static struct timespec zerotime;
67
68 inode = iget_locked(sb, cramino(cramfs_inode, offset));
69 if (!inode)
70 return ERR_PTR(-ENOMEM);
71 if (!(inode->i_state & I_NEW))
72 return inode;
73
74 switch (cramfs_inode->mode & S_IFMT) {
75 case S_IFREG:
76 inode->i_fop = &generic_ro_fops;
77 inode->i_data.a_ops = &cramfs_aops;
78 break;
79 case S_IFDIR:
80 inode->i_op = &cramfs_dir_inode_operations;
81 inode->i_fop = &cramfs_directory_operations;
82 break;
83 case S_IFLNK:
84 inode->i_op = &page_symlink_inode_operations;
85 inode->i_data.a_ops = &cramfs_aops;
86 break;
87 default:
88 init_special_inode(inode, cramfs_inode->mode,
89 old_decode_dev(cramfs_inode->size));
90 }
91
92 inode->i_mode = cramfs_inode->mode;
93 inode->i_uid = cramfs_inode->uid;
94 inode->i_gid = cramfs_inode->gid;
95
96 /* if the lower 2 bits are zero, the inode contains data */
97 if (!(inode->i_ino & 3)) {
98 inode->i_size = cramfs_inode->size;
99 inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
100 }
101
102 /* Struct copy intentional */
103 inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
104 /* inode->i_nlink is left 1 - arguably wrong for directories,
105 but it's the best we can do without reading the directory
106 contents. 1 yields the right result in GNU find, even
107 without -noleaf option. */
108
109 unlock_new_inode(inode);
110
111 return inode;
112 }
113
114 /*
115 * We have our own block cache: don't fill up the buffer cache
116 * with the rom-image, because the way the filesystem is set
117 * up the accesses should be fairly regular and cached in the
118 * page cache and dentry tree anyway..
119 *
120 * This also acts as a way to guarantee contiguous areas of up to
121 * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
122 * worry about end-of-buffer issues even when decompressing a full
123 * page cache.
124 */
125 #define READ_BUFFERS (2)
126 /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
127 #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
128
129 /*
130 * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
131 * data that takes up more space than the original and with unlucky
132 * alignment.
133 */
134 #define BLKS_PER_BUF_SHIFT (2)
135 #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
136 #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE)
137
138 static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
139 static unsigned buffer_blocknr[READ_BUFFERS];
140 static struct super_block * buffer_dev[READ_BUFFERS];
141 static int next_buffer;
142
143 /*
144 * Returns a pointer to a buffer containing at least LEN bytes of
145 * filesystem starting at byte offset OFFSET into the filesystem.
146 */
147 static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
148 {
149 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
150 struct page *pages[BLKS_PER_BUF];
151 unsigned i, blocknr, buffer;
152 unsigned long devsize;
153 char *data;
154
155 if (!len)
156 return NULL;
157 blocknr = offset >> PAGE_CACHE_SHIFT;
158 offset &= PAGE_CACHE_SIZE - 1;
159
160 /* Check if an existing buffer already has the data.. */
161 for (i = 0; i < READ_BUFFERS; i++) {
162 unsigned int blk_offset;
163
164 if (buffer_dev[i] != sb)
165 continue;
166 if (blocknr < buffer_blocknr[i])
167 continue;
168 blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
169 blk_offset += offset;
170 if (blk_offset + len > BUFFER_SIZE)
171 continue;
172 return read_buffers[i] + blk_offset;
173 }
174
175 devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
176
177 /* Ok, read in BLKS_PER_BUF pages completely first. */
178 for (i = 0; i < BLKS_PER_BUF; i++) {
179 struct page *page = NULL;
180
181 if (blocknr + i < devsize) {
182 page = read_mapping_page_async(mapping, blocknr + i,
183 NULL);
184 /* synchronous error? */
185 if (IS_ERR(page))
186 page = NULL;
187 }
188 pages[i] = page;
189 }
190
191 for (i = 0; i < BLKS_PER_BUF; i++) {
192 struct page *page = pages[i];
193 if (page) {
194 wait_on_page_locked(page);
195 if (!PageUptodate(page)) {
196 /* asynchronous error */
197 page_cache_release(page);
198 pages[i] = NULL;
199 }
200 }
201 }
202
203 buffer = next_buffer;
204 next_buffer = NEXT_BUFFER(buffer);
205 buffer_blocknr[buffer] = blocknr;
206 buffer_dev[buffer] = sb;
207
208 data = read_buffers[buffer];
209 for (i = 0; i < BLKS_PER_BUF; i++) {
210 struct page *page = pages[i];
211 if (page) {
212 memcpy(data, kmap(page), PAGE_CACHE_SIZE);
213 kunmap(page);
214 page_cache_release(page);
215 } else
216 memset(data, 0, PAGE_CACHE_SIZE);
217 data += PAGE_CACHE_SIZE;
218 }
219 return read_buffers[buffer] + offset;
220 }
221
222 static void cramfs_put_super(struct super_block *sb)
223 {
224 kfree(sb->s_fs_info);
225 sb->s_fs_info = NULL;
226 }
227
228 static int cramfs_remount(struct super_block *sb, int *flags, char *data)
229 {
230 *flags |= MS_RDONLY;
231 return 0;
232 }
233
234 static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
235 {
236 int i;
237 struct cramfs_super super;
238 unsigned long root_offset;
239 struct cramfs_sb_info *sbi;
240 struct inode *root;
241
242 sb->s_flags |= MS_RDONLY;
243
244 sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
245 if (!sbi)
246 return -ENOMEM;
247 sb->s_fs_info = sbi;
248
249 /* Invalidate the read buffers on mount: think disk change.. */
250 mutex_lock(&read_mutex);
251 for (i = 0; i < READ_BUFFERS; i++)
252 buffer_blocknr[i] = -1;
253
254 /* Read the first block and get the superblock from it */
255 memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
256 mutex_unlock(&read_mutex);
257
258 /* Do sanity checks on the superblock */
259 if (super.magic != CRAMFS_MAGIC) {
260 /* check for wrong endianness */
261 if (super.magic == CRAMFS_MAGIC_WEND) {
262 if (!silent)
263 printk(KERN_ERR "cramfs: wrong endianness\n");
264 goto out;
265 }
266
267 /* check at 512 byte offset */
268 mutex_lock(&read_mutex);
269 memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
270 mutex_unlock(&read_mutex);
271 if (super.magic != CRAMFS_MAGIC) {
272 if (super.magic == CRAMFS_MAGIC_WEND && !silent)
273 printk(KERN_ERR "cramfs: wrong endianness\n");
274 else if (!silent)
275 printk(KERN_ERR "cramfs: wrong magic\n");
276 goto out;
277 }
278 }
279
280 /* get feature flags first */
281 if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
282 printk(KERN_ERR "cramfs: unsupported filesystem features\n");
283 goto out;
284 }
285
286 /* Check that the root inode is in a sane state */
287 if (!S_ISDIR(super.root.mode)) {
288 printk(KERN_ERR "cramfs: root is not a directory\n");
289 goto out;
290 }
291 /* correct strange, hard-coded permissions of mkcramfs */
292 super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
293
294 root_offset = super.root.offset << 2;
295 if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
296 sbi->size=super.size;
297 sbi->blocks=super.fsid.blocks;
298 sbi->files=super.fsid.files;
299 } else {
300 sbi->size=1<<28;
301 sbi->blocks=0;
302 sbi->files=0;
303 }
304 sbi->magic=super.magic;
305 sbi->flags=super.flags;
306 if (root_offset == 0)
307 printk(KERN_INFO "cramfs: empty filesystem");
308 else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
309 ((root_offset != sizeof(struct cramfs_super)) &&
310 (root_offset != 512 + sizeof(struct cramfs_super))))
311 {
312 printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset);
313 goto out;
314 }
315
316 /* Set it all up.. */
317 sb->s_op = &cramfs_ops;
318 root = get_cramfs_inode(sb, &super.root, 0);
319 if (IS_ERR(root))
320 goto out;
321 sb->s_root = d_make_root(root);
322 if (!sb->s_root)
323 goto out;
324 return 0;
325 out:
326 kfree(sbi);
327 sb->s_fs_info = NULL;
328 return -EINVAL;
329 }
330
331 static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
332 {
333 struct super_block *sb = dentry->d_sb;
334 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
335
336 buf->f_type = CRAMFS_MAGIC;
337 buf->f_bsize = PAGE_CACHE_SIZE;
338 buf->f_blocks = CRAMFS_SB(sb)->blocks;
339 buf->f_bfree = 0;
340 buf->f_bavail = 0;
341 buf->f_files = CRAMFS_SB(sb)->files;
342 buf->f_ffree = 0;
343 buf->f_fsid.val[0] = (u32)id;
344 buf->f_fsid.val[1] = (u32)(id >> 32);
345 buf->f_namelen = CRAMFS_MAXPATHLEN;
346 return 0;
347 }
348
349 /*
350 * Read a cramfs directory entry.
351 */
352 static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
353 {
354 struct inode *inode = filp->f_path.dentry->d_inode;
355 struct super_block *sb = inode->i_sb;
356 char *buf;
357 unsigned int offset;
358 int copied;
359
360 /* Offset within the thing. */
361 offset = filp->f_pos;
362 if (offset >= inode->i_size)
363 return 0;
364 /* Directory entries are always 4-byte aligned */
365 if (offset & 3)
366 return -EINVAL;
367
368 buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
369 if (!buf)
370 return -ENOMEM;
371
372 copied = 0;
373 while (offset < inode->i_size) {
374 struct cramfs_inode *de;
375 unsigned long nextoffset;
376 char *name;
377 ino_t ino;
378 umode_t mode;
379 int namelen, error;
380
381 mutex_lock(&read_mutex);
382 de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
383 name = (char *)(de+1);
384
385 /*
386 * Namelengths on disk are shifted by two
387 * and the name padded out to 4-byte boundaries
388 * with zeroes.
389 */
390 namelen = de->namelen << 2;
391 memcpy(buf, name, namelen);
392 ino = cramino(de, OFFSET(inode) + offset);
393 mode = de->mode;
394 mutex_unlock(&read_mutex);
395 nextoffset = offset + sizeof(*de) + namelen;
396 for (;;) {
397 if (!namelen) {
398 kfree(buf);
399 return -EIO;
400 }
401 if (buf[namelen-1])
402 break;
403 namelen--;
404 }
405 error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
406 if (error)
407 break;
408
409 offset = nextoffset;
410 filp->f_pos = offset;
411 copied++;
412 }
413 kfree(buf);
414 return 0;
415 }
416
417 /*
418 * Lookup and fill in the inode data..
419 */
420 static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
421 {
422 unsigned int offset = 0;
423 struct inode *inode = NULL;
424 int sorted;
425
426 mutex_lock(&read_mutex);
427 sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
428 while (offset < dir->i_size) {
429 struct cramfs_inode *de;
430 char *name;
431 int namelen, retval;
432 int dir_off = OFFSET(dir) + offset;
433
434 de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
435 name = (char *)(de+1);
436
437 /* Try to take advantage of sorted directories */
438 if (sorted && (dentry->d_name.name[0] < name[0]))
439 break;
440
441 namelen = de->namelen << 2;
442 offset += sizeof(*de) + namelen;
443
444 /* Quick check that the name is roughly the right length */
445 if (((dentry->d_name.len + 3) & ~3) != namelen)
446 continue;
447
448 for (;;) {
449 if (!namelen) {
450 inode = ERR_PTR(-EIO);
451 goto out;
452 }
453 if (name[namelen-1])
454 break;
455 namelen--;
456 }
457 if (namelen != dentry->d_name.len)
458 continue;
459 retval = memcmp(dentry->d_name.name, name, namelen);
460 if (retval > 0)
461 continue;
462 if (!retval) {
463 inode = get_cramfs_inode(dir->i_sb, de, dir_off);
464 break;
465 }
466 /* else (retval < 0) */
467 if (sorted)
468 break;
469 }
470 out:
471 mutex_unlock(&read_mutex);
472 if (IS_ERR(inode))
473 return ERR_CAST(inode);
474 d_add(dentry, inode);
475 return NULL;
476 }
477
478 static int cramfs_readpage(struct file *file, struct page * page)
479 {
480 struct inode *inode = page->mapping->host;
481 u32 maxblock;
482 int bytes_filled;
483 void *pgdata;
484
485 maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
486 bytes_filled = 0;
487 pgdata = kmap(page);
488
489 if (page->index < maxblock) {
490 struct super_block *sb = inode->i_sb;
491 u32 blkptr_offset = OFFSET(inode) + page->index*4;
492 u32 start_offset, compr_len;
493
494 start_offset = OFFSET(inode) + maxblock*4;
495 mutex_lock(&read_mutex);
496 if (page->index)
497 start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4,
498 4);
499 compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) -
500 start_offset);
501 mutex_unlock(&read_mutex);
502
503 if (compr_len == 0)
504 ; /* hole */
505 else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
506 pr_err("cramfs: bad compressed blocksize %u\n",
507 compr_len);
508 goto err;
509 } else {
510 mutex_lock(&read_mutex);
511 bytes_filled = cramfs_uncompress_block(pgdata,
512 PAGE_CACHE_SIZE,
513 cramfs_read(sb, start_offset, compr_len),
514 compr_len);
515 mutex_unlock(&read_mutex);
516 if (unlikely(bytes_filled < 0))
517 goto err;
518 }
519 }
520
521 memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
522 flush_dcache_page(page);
523 kunmap(page);
524 SetPageUptodate(page);
525 unlock_page(page);
526 return 0;
527
528 err:
529 kunmap(page);
530 ClearPageUptodate(page);
531 SetPageError(page);
532 unlock_page(page);
533 return 0;
534 }
535
536 static const struct address_space_operations cramfs_aops = {
537 .readpage = cramfs_readpage
538 };
539
540 /*
541 * Our operations:
542 */
543
544 /*
545 * A directory can only readdir
546 */
547 static const struct file_operations cramfs_directory_operations = {
548 .llseek = generic_file_llseek,
549 .read = generic_read_dir,
550 .readdir = cramfs_readdir,
551 };
552
553 static const struct inode_operations cramfs_dir_inode_operations = {
554 .lookup = cramfs_lookup,
555 };
556
557 static const struct super_operations cramfs_ops = {
558 .put_super = cramfs_put_super,
559 .remount_fs = cramfs_remount,
560 .statfs = cramfs_statfs,
561 };
562
563 static struct dentry *cramfs_mount(struct file_system_type *fs_type,
564 int flags, const char *dev_name, void *data)
565 {
566 return mount_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
567 }
568
569 static struct file_system_type cramfs_fs_type = {
570 .owner = THIS_MODULE,
571 .name = "cramfs",
572 .mount = cramfs_mount,
573 .kill_sb = kill_block_super,
574 .fs_flags = FS_REQUIRES_DEV,
575 };
576
577 static int __init init_cramfs_fs(void)
578 {
579 int rv;
580
581 rv = cramfs_uncompress_init();
582 if (rv < 0)
583 return rv;
584 rv = register_filesystem(&cramfs_fs_type);
585 if (rv < 0)
586 cramfs_uncompress_exit();
587 return rv;
588 }
589
590 static void __exit exit_cramfs_fs(void)
591 {
592 cramfs_uncompress_exit();
593 unregister_filesystem(&cramfs_fs_type);
594 }
595
596 module_init(init_cramfs_fs)
597 module_exit(exit_cramfs_fs)
598 MODULE_LICENSE("GPL");
This page took 0.042144 seconds and 4 git commands to generate.