X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=fs%2Fbuffer.c;h=4342ab0ad99a90639e30abceeeba06ce5e4dec88;hb=e827f92355e1eeec2d227d3bd3350d04042a011e;hp=0d6ca7bac6c8b9af3e77bd8cd5dc953992592419;hpb=1ebbe2b20091d306453a5cf480a87e6cd28ae76f;p=deliverable%2Flinux.git diff --git a/fs/buffer.c b/fs/buffer.c index 0d6ca7bac6c8..4342ab0ad99a 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -160,12 +160,7 @@ int sync_blockdev(struct block_device *bdev) } EXPORT_SYMBOL(sync_blockdev); -/* - * Write out and wait upon all dirty data associated with this - * superblock. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_super(struct super_block *sb) +static void __fsync_super(struct super_block *sb) { sync_inodes_sb(sb, 0); DQUOT_SYNC(sb); @@ -177,7 +172,16 @@ int fsync_super(struct super_block *sb) sb->s_op->sync_fs(sb, 1); sync_blockdev(sb->s_bdev); sync_inodes_sb(sb, 1); +} +/* + * Write out and wait upon all dirty data associated with this + * superblock. Filesystem data as well as the underlying block + * device. Takes the superblock lock. + */ +int fsync_super(struct super_block *sb) +{ + __fsync_super(sb); return sync_blockdev(sb->s_bdev); } @@ -216,19 +220,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb->s_frozen = SB_FREEZE_WRITE; smp_wmb(); - sync_inodes_sb(sb, 0); - DQUOT_SYNC(sb); - - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - - sync_blockdev(sb->s_bdev); - sync_inodes_sb(sb, 1); + __fsync_super(sb); sb->s_frozen = SB_FREEZE_TRANS; smp_wmb(); @@ -327,31 +319,24 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) return ret; } -static long do_fsync(unsigned int fd, int datasync) +long do_fsync(struct file *file, int datasync) { - struct file * file; - struct address_space *mapping; - int ret, err; - - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; + int ret; + int err; + struct address_space *mapping = file->f_mapping; - ret = -EINVAL; if (!file->f_op || !file->f_op->fsync) { /* Why? We can still call filemap_fdatawrite */ - goto out_putf; + ret = -EINVAL; + goto out; } - mapping = file->f_mapping; - current->flags |= PF_SYNCWRITE; ret = filemap_fdatawrite(mapping); /* - * We need to protect against concurrent writers, - * which could cause livelocks in fsync_buffers_list + * We need to protect against concurrent writers, which could cause + * livelocks in fsync_buffers_list(). */ mutex_lock(&mapping->host->i_mutex); err = file->f_op->fsync(file, file->f_dentry, datasync); @@ -362,21 +347,31 @@ static long do_fsync(unsigned int fd, int datasync) if (!ret) ret = err; current->flags &= ~PF_SYNCWRITE; - -out_putf: - fput(file); out: return ret; } +static long __do_fsync(unsigned int fd, int datasync) +{ + struct file *file; + int ret = -EBADF; + + file = fget(fd); + if (file) { + ret = do_fsync(file, datasync); + fput(file); + } + return ret; +} + asmlinkage long sys_fsync(unsigned int fd) { - return do_fsync(fd, 0); + return __do_fsync(fd, 0); } asmlinkage long sys_fdatasync(unsigned int fd) { - return do_fsync(fd, 1); + return __do_fsync(fd, 1); } /* @@ -801,8 +796,7 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode) if (!mapping->assoc_mapping) { mapping->assoc_mapping = buffer_mapping; } else { - if (mapping->assoc_mapping != buffer_mapping) - BUG(); + BUG_ON(mapping->assoc_mapping != buffer_mapping); } if (list_empty(&bh->b_assoc_buffers)) { spin_lock(&buffer_mapping->private_lock); @@ -865,8 +859,8 @@ int __set_page_dirty_buffers(struct page *page) } write_unlock_irq(&mapping->tree_lock); __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + return 1; } - return 0; } EXPORT_SYMBOL(__set_page_dirty_buffers); @@ -1119,8 +1113,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, if (!page) return NULL; - if (!PageLocked(page)) - BUG(); + BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); @@ -1527,8 +1520,7 @@ void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset) { bh->b_page = page; - if (offset >= PAGE_SIZE) - BUG(); + BUG_ON(offset >= PAGE_SIZE); if (PageHighMem(page)) /* * This catches illegal uses and preserves the offset: @@ -3078,7 +3070,7 @@ static void recalc_bh_state(void) if (__get_cpu_var(bh_accounting).ratelimit++ < 4096) return; __get_cpu_var(bh_accounting).ratelimit = 0; - for_each_cpu(i) + for_each_online_cpu(i) tot += per_cpu(bh_accounting, i).nr; buffer_heads_over_limit = (tot > max_buffer_heads); } @@ -3127,6 +3119,9 @@ static void buffer_exit_cpu(int cpu) brelse(b->bhs[i]); b->bhs[i] = NULL; } + get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr; + per_cpu(bh_accounting, cpu).nr = 0; + put_cpu_var(bh_accounting); } static int buffer_cpu_notify(struct notifier_block *self, @@ -3143,8 +3138,11 @@ void __init buffer_init(void) int nrpages; bh_cachep = kmem_cache_create("buffer_head", - sizeof(struct buffer_head), 0, - SLAB_RECLAIM_ACCOUNT|SLAB_PANIC, init_buffer_head, NULL); + sizeof(struct buffer_head), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC| + SLAB_MEM_SPREAD), + init_buffer_head, + NULL); /* * Limit the bh occupancy to 10% of ZONE_NORMAL