GFS2: Fix bug trap and journaled data fsync
[deliverable/linux.git] / fs / gfs2 / file.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/ext2_fs.h>
22 #include <linux/falloc.h>
23 #include <linux/swap.h>
24 #include <linux/crc32.h>
25 #include <linux/writeback.h>
26 #include <asm/uaccess.h>
27 #include <linux/dlm.h>
28 #include <linux/dlm_plock.h>
29
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43
44 /**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
57 {
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 if (origin == 2) {
63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
64 &i_gh);
65 if (!error) {
66 error = generic_file_llseek_unlocked(file, offset, origin);
67 gfs2_glock_dq_uninit(&i_gh);
68 }
69 } else
70 error = generic_file_llseek_unlocked(file, offset, origin);
71
72 return error;
73 }
74
75 /**
76 * gfs2_readdir - Read directory entries from a directory
77 * @file: The directory to read from
78 * @dirent: Buffer for dirents
79 * @filldir: Function used to do the copying
80 *
81 * Returns: errno
82 */
83
84 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
85 {
86 struct inode *dir = file->f_mapping->host;
87 struct gfs2_inode *dip = GFS2_I(dir);
88 struct gfs2_holder d_gh;
89 u64 offset = file->f_pos;
90 int error;
91
92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
93 error = gfs2_glock_nq(&d_gh);
94 if (error) {
95 gfs2_holder_uninit(&d_gh);
96 return error;
97 }
98
99 error = gfs2_dir_read(dir, &offset, dirent, filldir);
100
101 gfs2_glock_dq_uninit(&d_gh);
102
103 file->f_pos = offset;
104
105 return error;
106 }
107
108 /**
109 * fsflags_cvt
110 * @table: A table of 32 u32 flags
111 * @val: a 32 bit value to convert
112 *
113 * This function can be used to convert between fsflags values and
114 * GFS2's own flags values.
115 *
116 * Returns: the converted flags
117 */
118 static u32 fsflags_cvt(const u32 *table, u32 val)
119 {
120 u32 res = 0;
121 while(val) {
122 if (val & 1)
123 res |= *table;
124 table++;
125 val >>= 1;
126 }
127 return res;
128 }
129
130 static const u32 fsflags_to_gfs2[32] = {
131 [3] = GFS2_DIF_SYNC,
132 [4] = GFS2_DIF_IMMUTABLE,
133 [5] = GFS2_DIF_APPENDONLY,
134 [7] = GFS2_DIF_NOATIME,
135 [12] = GFS2_DIF_EXHASH,
136 [14] = GFS2_DIF_INHERIT_JDATA,
137 };
138
139 static const u32 gfs2_to_fsflags[32] = {
140 [gfs2fl_Sync] = FS_SYNC_FL,
141 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
142 [gfs2fl_AppendOnly] = FS_APPEND_FL,
143 [gfs2fl_NoAtime] = FS_NOATIME_FL,
144 [gfs2fl_ExHash] = FS_INDEX_FL,
145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
146 };
147
148 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
149 {
150 struct inode *inode = filp->f_path.dentry->d_inode;
151 struct gfs2_inode *ip = GFS2_I(inode);
152 struct gfs2_holder gh;
153 int error;
154 u32 fsflags;
155
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
157 error = gfs2_glock_nq(&gh);
158 if (error)
159 return error;
160
161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
162 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
163 fsflags |= FS_JOURNAL_DATA_FL;
164 if (put_user(fsflags, ptr))
165 error = -EFAULT;
166
167 gfs2_glock_dq(&gh);
168 gfs2_holder_uninit(&gh);
169 return error;
170 }
171
172 void gfs2_set_inode_flags(struct inode *inode)
173 {
174 struct gfs2_inode *ip = GFS2_I(inode);
175 unsigned int flags = inode->i_flags;
176
177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
178 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
179 inode->i_flags |= S_NOSEC;
180 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
181 flags |= S_IMMUTABLE;
182 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
183 flags |= S_APPEND;
184 if (ip->i_diskflags & GFS2_DIF_NOATIME)
185 flags |= S_NOATIME;
186 if (ip->i_diskflags & GFS2_DIF_SYNC)
187 flags |= S_SYNC;
188 inode->i_flags = flags;
189 }
190
191 /* Flags that can be set by user space */
192 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
193 GFS2_DIF_IMMUTABLE| \
194 GFS2_DIF_APPENDONLY| \
195 GFS2_DIF_NOATIME| \
196 GFS2_DIF_SYNC| \
197 GFS2_DIF_SYSTEM| \
198 GFS2_DIF_INHERIT_JDATA)
199
200 /**
201 * gfs2_set_flags - set flags on an inode
202 * @inode: The inode
203 * @flags: The flags to set
204 * @mask: Indicates which flags are valid
205 *
206 */
207 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
208 {
209 struct inode *inode = filp->f_path.dentry->d_inode;
210 struct gfs2_inode *ip = GFS2_I(inode);
211 struct gfs2_sbd *sdp = GFS2_SB(inode);
212 struct buffer_head *bh;
213 struct gfs2_holder gh;
214 int error;
215 u32 new_flags, flags;
216
217 error = mnt_want_write(filp->f_path.mnt);
218 if (error)
219 return error;
220
221 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
222 if (error)
223 goto out_drop_write;
224
225 error = -EACCES;
226 if (!inode_owner_or_capable(inode))
227 goto out;
228
229 error = 0;
230 flags = ip->i_diskflags;
231 new_flags = (flags & ~mask) | (reqflags & mask);
232 if ((new_flags ^ flags) == 0)
233 goto out;
234
235 error = -EINVAL;
236 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
237 goto out;
238
239 error = -EPERM;
240 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
241 goto out;
242 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
243 goto out;
244 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
245 !capable(CAP_LINUX_IMMUTABLE))
246 goto out;
247 if (!IS_IMMUTABLE(inode)) {
248 error = gfs2_permission(inode, MAY_WRITE);
249 if (error)
250 goto out;
251 }
252 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
253 if (flags & GFS2_DIF_JDATA)
254 gfs2_log_flush(sdp, ip->i_gl);
255 error = filemap_fdatawrite(inode->i_mapping);
256 if (error)
257 goto out;
258 error = filemap_fdatawait(inode->i_mapping);
259 if (error)
260 goto out;
261 }
262 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
263 if (error)
264 goto out;
265 error = gfs2_meta_inode_buffer(ip, &bh);
266 if (error)
267 goto out_trans_end;
268 gfs2_trans_add_bh(ip->i_gl, bh, 1);
269 ip->i_diskflags = new_flags;
270 gfs2_dinode_out(ip, bh->b_data);
271 brelse(bh);
272 gfs2_set_inode_flags(inode);
273 gfs2_set_aops(inode);
274 out_trans_end:
275 gfs2_trans_end(sdp);
276 out:
277 gfs2_glock_dq_uninit(&gh);
278 out_drop_write:
279 mnt_drop_write(filp->f_path.mnt);
280 return error;
281 }
282
283 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
284 {
285 struct inode *inode = filp->f_path.dentry->d_inode;
286 u32 fsflags, gfsflags;
287
288 if (get_user(fsflags, ptr))
289 return -EFAULT;
290
291 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
292 if (!S_ISDIR(inode->i_mode)) {
293 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
294 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
295 return do_gfs2_set_flags(filp, gfsflags, ~0);
296 }
297 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
298 }
299
300 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
301 {
302 switch(cmd) {
303 case FS_IOC_GETFLAGS:
304 return gfs2_get_flags(filp, (u32 __user *)arg);
305 case FS_IOC_SETFLAGS:
306 return gfs2_set_flags(filp, (u32 __user *)arg);
307 }
308 return -ENOTTY;
309 }
310
311 /**
312 * gfs2_allocate_page_backing - Use bmap to allocate blocks
313 * @page: The (locked) page to allocate backing for
314 *
315 * We try to allocate all the blocks required for the page in
316 * one go. This might fail for various reasons, so we keep
317 * trying until all the blocks to back this page are allocated.
318 * If some of the blocks are already allocated, thats ok too.
319 */
320
321 static int gfs2_allocate_page_backing(struct page *page)
322 {
323 struct inode *inode = page->mapping->host;
324 struct buffer_head bh;
325 unsigned long size = PAGE_CACHE_SIZE;
326 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
327
328 do {
329 bh.b_state = 0;
330 bh.b_size = size;
331 gfs2_block_map(inode, lblock, &bh, 1);
332 if (!buffer_mapped(&bh))
333 return -EIO;
334 size -= bh.b_size;
335 lblock += (bh.b_size >> inode->i_blkbits);
336 } while(size > 0);
337 return 0;
338 }
339
340 /**
341 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
342 * @vma: The virtual memory area
343 * @page: The page which is about to become writable
344 *
345 * When the page becomes writable, we need to ensure that we have
346 * blocks allocated on disk to back that page.
347 */
348
349 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
350 {
351 struct page *page = vmf->page;
352 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
353 struct gfs2_inode *ip = GFS2_I(inode);
354 struct gfs2_sbd *sdp = GFS2_SB(inode);
355 unsigned long last_index;
356 u64 pos = page->index << PAGE_CACHE_SHIFT;
357 unsigned int data_blocks, ind_blocks, rblocks;
358 struct gfs2_holder gh;
359 struct gfs2_alloc *al;
360 int ret;
361
362 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
363 ret = gfs2_glock_nq(&gh);
364 if (ret)
365 goto out;
366
367 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
368 set_bit(GIF_SW_PAGED, &ip->i_flags);
369
370 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
371 goto out_unlock;
372 ret = -ENOMEM;
373 al = gfs2_alloc_get(ip);
374 if (al == NULL)
375 goto out_unlock;
376
377 ret = gfs2_quota_lock_check(ip);
378 if (ret)
379 goto out_alloc_put;
380 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
381 al->al_requested = data_blocks + ind_blocks;
382 ret = gfs2_inplace_reserve(ip);
383 if (ret)
384 goto out_quota_unlock;
385
386 rblocks = RES_DINODE + ind_blocks;
387 if (gfs2_is_jdata(ip))
388 rblocks += data_blocks ? data_blocks : 1;
389 if (ind_blocks || data_blocks) {
390 rblocks += RES_STATFS + RES_QUOTA;
391 rblocks += gfs2_rg_blocks(al);
392 }
393 ret = gfs2_trans_begin(sdp, rblocks, 0);
394 if (ret)
395 goto out_trans_fail;
396
397 lock_page(page);
398 ret = -EINVAL;
399 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
400 if (page->index > last_index)
401 goto out_unlock_page;
402 ret = 0;
403 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
404 goto out_unlock_page;
405 if (gfs2_is_stuffed(ip)) {
406 ret = gfs2_unstuff_dinode(ip, page);
407 if (ret)
408 goto out_unlock_page;
409 }
410 ret = gfs2_allocate_page_backing(page);
411
412 out_unlock_page:
413 unlock_page(page);
414 gfs2_trans_end(sdp);
415 out_trans_fail:
416 gfs2_inplace_release(ip);
417 out_quota_unlock:
418 gfs2_quota_unlock(ip);
419 out_alloc_put:
420 gfs2_alloc_put(ip);
421 out_unlock:
422 gfs2_glock_dq(&gh);
423 out:
424 gfs2_holder_uninit(&gh);
425 if (ret == -ENOMEM)
426 ret = VM_FAULT_OOM;
427 else if (ret)
428 ret = VM_FAULT_SIGBUS;
429 return ret;
430 }
431
432 static const struct vm_operations_struct gfs2_vm_ops = {
433 .fault = filemap_fault,
434 .page_mkwrite = gfs2_page_mkwrite,
435 };
436
437 /**
438 * gfs2_mmap -
439 * @file: The file to map
440 * @vma: The VMA which described the mapping
441 *
442 * There is no need to get a lock here unless we should be updating
443 * atime. We ignore any locking errors since the only consequence is
444 * a missed atime update (which will just be deferred until later).
445 *
446 * Returns: 0
447 */
448
449 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
450 {
451 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
452
453 if (!(file->f_flags & O_NOATIME) &&
454 !IS_NOATIME(&ip->i_inode)) {
455 struct gfs2_holder i_gh;
456 int error;
457
458 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
459 error = gfs2_glock_nq(&i_gh);
460 if (error == 0) {
461 file_accessed(file);
462 gfs2_glock_dq(&i_gh);
463 }
464 gfs2_holder_uninit(&i_gh);
465 if (error)
466 return error;
467 }
468 vma->vm_ops = &gfs2_vm_ops;
469 vma->vm_flags |= VM_CAN_NONLINEAR;
470
471 return 0;
472 }
473
474 /**
475 * gfs2_open - open a file
476 * @inode: the inode to open
477 * @file: the struct file for this opening
478 *
479 * Returns: errno
480 */
481
482 static int gfs2_open(struct inode *inode, struct file *file)
483 {
484 struct gfs2_inode *ip = GFS2_I(inode);
485 struct gfs2_holder i_gh;
486 struct gfs2_file *fp;
487 int error;
488
489 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
490 if (!fp)
491 return -ENOMEM;
492
493 mutex_init(&fp->f_fl_mutex);
494
495 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
496 file->private_data = fp;
497
498 if (S_ISREG(ip->i_inode.i_mode)) {
499 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
500 &i_gh);
501 if (error)
502 goto fail;
503
504 if (!(file->f_flags & O_LARGEFILE) &&
505 i_size_read(inode) > MAX_NON_LFS) {
506 error = -EOVERFLOW;
507 goto fail_gunlock;
508 }
509
510 gfs2_glock_dq_uninit(&i_gh);
511 }
512
513 return 0;
514
515 fail_gunlock:
516 gfs2_glock_dq_uninit(&i_gh);
517 fail:
518 file->private_data = NULL;
519 kfree(fp);
520 return error;
521 }
522
523 /**
524 * gfs2_close - called to close a struct file
525 * @inode: the inode the struct file belongs to
526 * @file: the struct file being closed
527 *
528 * Returns: errno
529 */
530
531 static int gfs2_close(struct inode *inode, struct file *file)
532 {
533 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
534 struct gfs2_file *fp;
535
536 fp = file->private_data;
537 file->private_data = NULL;
538
539 if (gfs2_assert_warn(sdp, fp))
540 return -EIO;
541
542 kfree(fp);
543
544 return 0;
545 }
546
547 /**
548 * gfs2_fsync - sync the dirty data for a file (across the cluster)
549 * @file: the file that points to the dentry
550 * @start: the start position in the file to sync
551 * @end: the end position in the file to sync
552 * @datasync: set if we can ignore timestamp changes
553 *
554 * We split the data flushing here so that we don't wait for the data
555 * until after we've also sent the metadata to disk. Note that for
556 * data=ordered, we will write & wait for the data at the log flush
557 * stage anyway, so this is unlikely to make much of a difference
558 * except in the data=writeback case.
559 *
560 * If the fdatawrite fails due to any reason except -EIO, we will
561 * continue the remainder of the fsync, although we'll still report
562 * the error at the end. This is to match filemap_write_and_wait_range()
563 * behaviour.
564 *
565 * Returns: errno
566 */
567
568 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
569 int datasync)
570 {
571 struct address_space *mapping = file->f_mapping;
572 struct inode *inode = mapping->host;
573 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
574 struct gfs2_inode *ip = GFS2_I(inode);
575 int ret, ret1 = 0;
576
577 if (mapping->nrpages) {
578 ret1 = filemap_fdatawrite_range(mapping, start, end);
579 if (ret1 == -EIO)
580 return ret1;
581 }
582
583 if (datasync)
584 sync_state &= ~I_DIRTY_SYNC;
585
586 if (sync_state) {
587 mutex_lock(&inode->i_mutex);
588 ret = sync_inode_metadata(inode, 1);
589 if (ret) {
590 mutex_unlock(&inode->i_mutex);
591 return ret;
592 }
593 if (gfs2_is_jdata(ip))
594 filemap_write_and_wait(mapping);
595 gfs2_ail_flush(ip->i_gl);
596 mutex_unlock(&inode->i_mutex);
597 }
598
599 if (mapping->nrpages)
600 ret = filemap_fdatawait_range(mapping, start, end);
601
602 return ret ? ret : ret1;
603 }
604
605 /**
606 * gfs2_file_aio_write - Perform a write to a file
607 * @iocb: The io context
608 * @iov: The data to write
609 * @nr_segs: Number of @iov segments
610 * @pos: The file position
611 *
612 * We have to do a lock/unlock here to refresh the inode size for
613 * O_APPEND writes, otherwise we can land up writing at the wrong
614 * offset. There is still a race, but provided the app is using its
615 * own file locking, this will make O_APPEND work as expected.
616 *
617 */
618
619 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
620 unsigned long nr_segs, loff_t pos)
621 {
622 struct file *file = iocb->ki_filp;
623
624 if (file->f_flags & O_APPEND) {
625 struct dentry *dentry = file->f_dentry;
626 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
627 struct gfs2_holder gh;
628 int ret;
629
630 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
631 if (ret)
632 return ret;
633 gfs2_glock_dq_uninit(&gh);
634 }
635
636 return generic_file_aio_write(iocb, iov, nr_segs, pos);
637 }
638
639 static int empty_write_end(struct page *page, unsigned from,
640 unsigned to, int mode)
641 {
642 struct inode *inode = page->mapping->host;
643 struct gfs2_inode *ip = GFS2_I(inode);
644 struct buffer_head *bh;
645 unsigned offset, blksize = 1 << inode->i_blkbits;
646 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
647
648 zero_user(page, from, to-from);
649 mark_page_accessed(page);
650
651 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
652 if (!gfs2_is_writeback(ip))
653 gfs2_page_add_databufs(ip, page, from, to);
654
655 block_commit_write(page, from, to);
656 return 0;
657 }
658
659 offset = 0;
660 bh = page_buffers(page);
661 while (offset < to) {
662 if (offset >= from) {
663 set_buffer_uptodate(bh);
664 mark_buffer_dirty(bh);
665 clear_buffer_new(bh);
666 write_dirty_buffer(bh, WRITE);
667 }
668 offset += blksize;
669 bh = bh->b_this_page;
670 }
671
672 offset = 0;
673 bh = page_buffers(page);
674 while (offset < to) {
675 if (offset >= from) {
676 wait_on_buffer(bh);
677 if (!buffer_uptodate(bh))
678 return -EIO;
679 }
680 offset += blksize;
681 bh = bh->b_this_page;
682 }
683 return 0;
684 }
685
686 static int needs_empty_write(sector_t block, struct inode *inode)
687 {
688 int error;
689 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
690
691 bh_map.b_size = 1 << inode->i_blkbits;
692 error = gfs2_block_map(inode, block, &bh_map, 0);
693 if (unlikely(error))
694 return error;
695 return !buffer_mapped(&bh_map);
696 }
697
698 static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
699 int mode)
700 {
701 struct inode *inode = page->mapping->host;
702 unsigned start, end, next, blksize;
703 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
704 int ret;
705
706 blksize = 1 << inode->i_blkbits;
707 next = end = 0;
708 while (next < from) {
709 next += blksize;
710 block++;
711 }
712 start = next;
713 do {
714 next += blksize;
715 ret = needs_empty_write(block, inode);
716 if (unlikely(ret < 0))
717 return ret;
718 if (ret == 0) {
719 if (end) {
720 ret = __block_write_begin(page, start, end - start,
721 gfs2_block_map);
722 if (unlikely(ret))
723 return ret;
724 ret = empty_write_end(page, start, end, mode);
725 if (unlikely(ret))
726 return ret;
727 end = 0;
728 }
729 start = next;
730 }
731 else
732 end = next;
733 block++;
734 } while (next < to);
735
736 if (end) {
737 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
738 if (unlikely(ret))
739 return ret;
740 ret = empty_write_end(page, start, end, mode);
741 if (unlikely(ret))
742 return ret;
743 }
744
745 return 0;
746 }
747
748 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
749 int mode)
750 {
751 struct gfs2_inode *ip = GFS2_I(inode);
752 struct buffer_head *dibh;
753 int error;
754 u64 start = offset >> PAGE_CACHE_SHIFT;
755 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
756 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
757 pgoff_t curr;
758 struct page *page;
759 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
760 unsigned int from, to;
761
762 if (!end_offset)
763 end_offset = PAGE_CACHE_SIZE;
764
765 error = gfs2_meta_inode_buffer(ip, &dibh);
766 if (unlikely(error))
767 goto out;
768
769 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
770
771 if (gfs2_is_stuffed(ip)) {
772 error = gfs2_unstuff_dinode(ip, NULL);
773 if (unlikely(error))
774 goto out;
775 }
776
777 curr = start;
778 offset = start << PAGE_CACHE_SHIFT;
779 from = start_offset;
780 to = PAGE_CACHE_SIZE;
781 while (curr <= end) {
782 page = grab_cache_page_write_begin(inode->i_mapping, curr,
783 AOP_FLAG_NOFS);
784 if (unlikely(!page)) {
785 error = -ENOMEM;
786 goto out;
787 }
788
789 if (curr == end)
790 to = end_offset;
791 error = write_empty_blocks(page, from, to, mode);
792 if (!error && offset + to > inode->i_size &&
793 !(mode & FALLOC_FL_KEEP_SIZE)) {
794 i_size_write(inode, offset + to);
795 }
796 unlock_page(page);
797 page_cache_release(page);
798 if (error)
799 goto out;
800 curr++;
801 offset += PAGE_CACHE_SIZE;
802 from = 0;
803 }
804
805 gfs2_dinode_out(ip, dibh->b_data);
806 mark_inode_dirty(inode);
807
808 brelse(dibh);
809
810 out:
811 return error;
812 }
813
814 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
815 unsigned int *data_blocks, unsigned int *ind_blocks)
816 {
817 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
818 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
819 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
820
821 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
822 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
823 max_data -= tmp;
824 }
825 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
826 so it might end up with fewer data blocks */
827 if (max_data <= *data_blocks)
828 return;
829 *data_blocks = max_data;
830 *ind_blocks = max_blocks - max_data;
831 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
832 if (*len > max) {
833 *len = max;
834 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
835 }
836 }
837
838 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
839 loff_t len)
840 {
841 struct inode *inode = file->f_path.dentry->d_inode;
842 struct gfs2_sbd *sdp = GFS2_SB(inode);
843 struct gfs2_inode *ip = GFS2_I(inode);
844 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
845 loff_t bytes, max_bytes;
846 struct gfs2_alloc *al;
847 int error;
848 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
849 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
850 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
851
852 /* We only support the FALLOC_FL_KEEP_SIZE mode */
853 if (mode & ~FALLOC_FL_KEEP_SIZE)
854 return -EOPNOTSUPP;
855
856 offset &= bsize_mask;
857
858 len = next - offset;
859 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
860 if (!bytes)
861 bytes = UINT_MAX;
862 bytes &= bsize_mask;
863 if (bytes == 0)
864 bytes = sdp->sd_sb.sb_bsize;
865
866 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
867 error = gfs2_glock_nq(&ip->i_gh);
868 if (unlikely(error))
869 goto out_uninit;
870
871 if (!gfs2_write_alloc_required(ip, offset, len))
872 goto out_unlock;
873
874 while (len > 0) {
875 if (len < bytes)
876 bytes = len;
877 al = gfs2_alloc_get(ip);
878 if (!al) {
879 error = -ENOMEM;
880 goto out_unlock;
881 }
882
883 error = gfs2_quota_lock_check(ip);
884 if (error)
885 goto out_alloc_put;
886
887 retry:
888 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
889
890 al->al_requested = data_blocks + ind_blocks;
891 error = gfs2_inplace_reserve(ip);
892 if (error) {
893 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
894 bytes >>= 1;
895 bytes &= bsize_mask;
896 if (bytes == 0)
897 bytes = sdp->sd_sb.sb_bsize;
898 goto retry;
899 }
900 goto out_qunlock;
901 }
902 max_bytes = bytes;
903 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
904 al->al_requested = data_blocks + ind_blocks;
905
906 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
907 RES_RG_HDR + gfs2_rg_blocks(al);
908 if (gfs2_is_jdata(ip))
909 rblocks += data_blocks ? data_blocks : 1;
910
911 error = gfs2_trans_begin(sdp, rblocks,
912 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
913 if (error)
914 goto out_trans_fail;
915
916 error = fallocate_chunk(inode, offset, max_bytes, mode);
917 gfs2_trans_end(sdp);
918
919 if (error)
920 goto out_trans_fail;
921
922 len -= max_bytes;
923 offset += max_bytes;
924 gfs2_inplace_release(ip);
925 gfs2_quota_unlock(ip);
926 gfs2_alloc_put(ip);
927 }
928 goto out_unlock;
929
930 out_trans_fail:
931 gfs2_inplace_release(ip);
932 out_qunlock:
933 gfs2_quota_unlock(ip);
934 out_alloc_put:
935 gfs2_alloc_put(ip);
936 out_unlock:
937 gfs2_glock_dq(&ip->i_gh);
938 out_uninit:
939 gfs2_holder_uninit(&ip->i_gh);
940 return error;
941 }
942
943 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
944
945 /**
946 * gfs2_setlease - acquire/release a file lease
947 * @file: the file pointer
948 * @arg: lease type
949 * @fl: file lock
950 *
951 * We don't currently have a way to enforce a lease across the whole
952 * cluster; until we do, disable leases (by just returning -EINVAL),
953 * unless the administrator has requested purely local locking.
954 *
955 * Locking: called under lock_flocks
956 *
957 * Returns: errno
958 */
959
960 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
961 {
962 return -EINVAL;
963 }
964
965 /**
966 * gfs2_lock - acquire/release a posix lock on a file
967 * @file: the file pointer
968 * @cmd: either modify or retrieve lock state, possibly wait
969 * @fl: type and range of lock
970 *
971 * Returns: errno
972 */
973
974 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
975 {
976 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
977 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
978 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
979
980 if (!(fl->fl_flags & FL_POSIX))
981 return -ENOLCK;
982 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
983 return -ENOLCK;
984
985 if (cmd == F_CANCELLK) {
986 /* Hack: */
987 cmd = F_SETLK;
988 fl->fl_type = F_UNLCK;
989 }
990 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
991 return -EIO;
992 if (IS_GETLK(cmd))
993 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
994 else if (fl->fl_type == F_UNLCK)
995 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
996 else
997 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
998 }
999
1000 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1001 {
1002 struct gfs2_file *fp = file->private_data;
1003 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1004 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
1005 struct gfs2_glock *gl;
1006 unsigned int state;
1007 int flags;
1008 int error = 0;
1009
1010 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1011 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
1012
1013 mutex_lock(&fp->f_fl_mutex);
1014
1015 gl = fl_gh->gh_gl;
1016 if (gl) {
1017 if (fl_gh->gh_state == state)
1018 goto out;
1019 flock_lock_file_wait(file,
1020 &(struct file_lock){.fl_type = F_UNLCK});
1021 gfs2_glock_dq_wait(fl_gh);
1022 gfs2_holder_reinit(state, flags, fl_gh);
1023 } else {
1024 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1025 &gfs2_flock_glops, CREATE, &gl);
1026 if (error)
1027 goto out;
1028 gfs2_holder_init(gl, state, flags, fl_gh);
1029 gfs2_glock_put(gl);
1030 }
1031 error = gfs2_glock_nq(fl_gh);
1032 if (error) {
1033 gfs2_holder_uninit(fl_gh);
1034 if (error == GLR_TRYFAILED)
1035 error = -EAGAIN;
1036 } else {
1037 error = flock_lock_file_wait(file, fl);
1038 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1039 }
1040
1041 out:
1042 mutex_unlock(&fp->f_fl_mutex);
1043 return error;
1044 }
1045
1046 static void do_unflock(struct file *file, struct file_lock *fl)
1047 {
1048 struct gfs2_file *fp = file->private_data;
1049 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1050
1051 mutex_lock(&fp->f_fl_mutex);
1052 flock_lock_file_wait(file, fl);
1053 if (fl_gh->gh_gl) {
1054 gfs2_glock_dq_wait(fl_gh);
1055 gfs2_holder_uninit(fl_gh);
1056 }
1057 mutex_unlock(&fp->f_fl_mutex);
1058 }
1059
1060 /**
1061 * gfs2_flock - acquire/release a flock lock on a file
1062 * @file: the file pointer
1063 * @cmd: either modify or retrieve lock state, possibly wait
1064 * @fl: type and range of lock
1065 *
1066 * Returns: errno
1067 */
1068
1069 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1070 {
1071 if (!(fl->fl_flags & FL_FLOCK))
1072 return -ENOLCK;
1073 if (fl->fl_type & LOCK_MAND)
1074 return -EOPNOTSUPP;
1075
1076 if (fl->fl_type == F_UNLCK) {
1077 do_unflock(file, fl);
1078 return 0;
1079 } else {
1080 return do_flock(file, cmd, fl);
1081 }
1082 }
1083
1084 const struct file_operations gfs2_file_fops = {
1085 .llseek = gfs2_llseek,
1086 .read = do_sync_read,
1087 .aio_read = generic_file_aio_read,
1088 .write = do_sync_write,
1089 .aio_write = gfs2_file_aio_write,
1090 .unlocked_ioctl = gfs2_ioctl,
1091 .mmap = gfs2_mmap,
1092 .open = gfs2_open,
1093 .release = gfs2_close,
1094 .fsync = gfs2_fsync,
1095 .lock = gfs2_lock,
1096 .flock = gfs2_flock,
1097 .splice_read = generic_file_splice_read,
1098 .splice_write = generic_file_splice_write,
1099 .setlease = gfs2_setlease,
1100 .fallocate = gfs2_fallocate,
1101 };
1102
1103 const struct file_operations gfs2_dir_fops = {
1104 .readdir = gfs2_readdir,
1105 .unlocked_ioctl = gfs2_ioctl,
1106 .open = gfs2_open,
1107 .release = gfs2_close,
1108 .fsync = gfs2_fsync,
1109 .lock = gfs2_lock,
1110 .flock = gfs2_flock,
1111 .llseek = default_llseek,
1112 };
1113
1114 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1115
1116 const struct file_operations gfs2_file_fops_nolock = {
1117 .llseek = gfs2_llseek,
1118 .read = do_sync_read,
1119 .aio_read = generic_file_aio_read,
1120 .write = do_sync_write,
1121 .aio_write = gfs2_file_aio_write,
1122 .unlocked_ioctl = gfs2_ioctl,
1123 .mmap = gfs2_mmap,
1124 .open = gfs2_open,
1125 .release = gfs2_close,
1126 .fsync = gfs2_fsync,
1127 .splice_read = generic_file_splice_read,
1128 .splice_write = generic_file_splice_write,
1129 .setlease = generic_setlease,
1130 .fallocate = gfs2_fallocate,
1131 };
1132
1133 const struct file_operations gfs2_dir_fops_nolock = {
1134 .readdir = gfs2_readdir,
1135 .unlocked_ioctl = gfs2_ioctl,
1136 .open = gfs2_open,
1137 .release = gfs2_close,
1138 .fsync = gfs2_fsync,
1139 .llseek = default_llseek,
1140 };
1141
This page took 0.053618 seconds and 5 git commands to generate.