ocfs2: update truncate handling of partial clusters
[deliverable/linux.git] / fs / ocfs2 / file.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37
38 #define MLOG_MASK_PREFIX ML_INODE
39 #include <cluster/masklog.h>
40
41 #include "ocfs2.h"
42
43 #include "alloc.h"
44 #include "aops.h"
45 #include "dir.h"
46 #include "dlmglue.h"
47 #include "extent_map.h"
48 #include "file.h"
49 #include "sysfile.h"
50 #include "inode.h"
51 #include "ioctl.h"
52 #include "journal.h"
53 #include "mmap.h"
54 #include "suballoc.h"
55 #include "super.h"
56
57 #include "buffer_head_io.h"
58
59 static int ocfs2_sync_inode(struct inode *inode)
60 {
61 filemap_fdatawrite(inode->i_mapping);
62 return sync_mapping_buffers(inode->i_mapping);
63 }
64
65 static int ocfs2_file_open(struct inode *inode, struct file *file)
66 {
67 int status;
68 int mode = file->f_flags;
69 struct ocfs2_inode_info *oi = OCFS2_I(inode);
70
71 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
72 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
73
74 spin_lock(&oi->ip_lock);
75
76 /* Check that the inode hasn't been wiped from disk by another
77 * node. If it hasn't then we're safe as long as we hold the
78 * spin lock until our increment of open count. */
79 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
80 spin_unlock(&oi->ip_lock);
81
82 status = -ENOENT;
83 goto leave;
84 }
85
86 if (mode & O_DIRECT)
87 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
88
89 oi->ip_open_count++;
90 spin_unlock(&oi->ip_lock);
91 status = 0;
92 leave:
93 mlog_exit(status);
94 return status;
95 }
96
97 static int ocfs2_file_release(struct inode *inode, struct file *file)
98 {
99 struct ocfs2_inode_info *oi = OCFS2_I(inode);
100
101 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
102 file->f_path.dentry->d_name.len,
103 file->f_path.dentry->d_name.name);
104
105 spin_lock(&oi->ip_lock);
106 if (!--oi->ip_open_count)
107 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
108 spin_unlock(&oi->ip_lock);
109
110 mlog_exit(0);
111
112 return 0;
113 }
114
115 static int ocfs2_sync_file(struct file *file,
116 struct dentry *dentry,
117 int datasync)
118 {
119 int err = 0;
120 journal_t *journal;
121 struct inode *inode = dentry->d_inode;
122 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
123
124 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
125 dentry->d_name.len, dentry->d_name.name);
126
127 err = ocfs2_sync_inode(dentry->d_inode);
128 if (err)
129 goto bail;
130
131 journal = osb->journal->j_journal;
132 err = journal_force_commit(journal);
133
134 bail:
135 mlog_exit(err);
136
137 return (err < 0) ? -EIO : 0;
138 }
139
140 int ocfs2_should_update_atime(struct inode *inode,
141 struct vfsmount *vfsmnt)
142 {
143 struct timespec now;
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
145
146 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
147 return 0;
148
149 if ((inode->i_flags & S_NOATIME) ||
150 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
151 return 0;
152
153 /*
154 * We can be called with no vfsmnt structure - NFSD will
155 * sometimes do this.
156 *
157 * Note that our action here is different than touch_atime() -
158 * if we can't tell whether this is a noatime mount, then we
159 * don't know whether to trust the value of s_atime_quantum.
160 */
161 if (vfsmnt == NULL)
162 return 0;
163
164 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
165 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
166 return 0;
167
168 if (vfsmnt->mnt_flags & MNT_RELATIME) {
169 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
170 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
171 return 1;
172
173 return 0;
174 }
175
176 now = CURRENT_TIME;
177 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
178 return 0;
179 else
180 return 1;
181 }
182
183 int ocfs2_update_inode_atime(struct inode *inode,
184 struct buffer_head *bh)
185 {
186 int ret;
187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
188 handle_t *handle;
189
190 mlog_entry_void();
191
192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
193 if (handle == NULL) {
194 ret = -ENOMEM;
195 mlog_errno(ret);
196 goto out;
197 }
198
199 inode->i_atime = CURRENT_TIME;
200 ret = ocfs2_mark_inode_dirty(handle, inode, bh);
201 if (ret < 0)
202 mlog_errno(ret);
203
204 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
205 out:
206 mlog_exit(ret);
207 return ret;
208 }
209
210 static int ocfs2_set_inode_size(handle_t *handle,
211 struct inode *inode,
212 struct buffer_head *fe_bh,
213 u64 new_i_size)
214 {
215 int status;
216
217 mlog_entry_void();
218 i_size_write(inode, new_i_size);
219 inode->i_blocks = ocfs2_inode_sector_count(inode);
220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
221
222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
223 if (status < 0) {
224 mlog_errno(status);
225 goto bail;
226 }
227
228 bail:
229 mlog_exit(status);
230 return status;
231 }
232
233 static int ocfs2_simple_size_update(struct inode *inode,
234 struct buffer_head *di_bh,
235 u64 new_i_size)
236 {
237 int ret;
238 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
239 handle_t *handle = NULL;
240
241 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
242 if (handle == NULL) {
243 ret = -ENOMEM;
244 mlog_errno(ret);
245 goto out;
246 }
247
248 ret = ocfs2_set_inode_size(handle, inode, di_bh,
249 new_i_size);
250 if (ret < 0)
251 mlog_errno(ret);
252
253 ocfs2_commit_trans(osb, handle);
254 out:
255 return ret;
256 }
257
258 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
259 struct inode *inode,
260 struct buffer_head *fe_bh,
261 u64 new_i_size)
262 {
263 int status;
264 handle_t *handle;
265 struct ocfs2_dinode *di;
266 u64 cluster_bytes;
267
268 mlog_entry_void();
269
270 /* TODO: This needs to actually orphan the inode in this
271 * transaction. */
272
273 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
274 if (IS_ERR(handle)) {
275 status = PTR_ERR(handle);
276 mlog_errno(status);
277 goto out;
278 }
279
280 status = ocfs2_journal_access(handle, inode, fe_bh,
281 OCFS2_JOURNAL_ACCESS_WRITE);
282 if (status < 0) {
283 mlog_errno(status);
284 goto out_commit;
285 }
286
287 /*
288 * Do this before setting i_size.
289 */
290 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
291 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
292 cluster_bytes);
293 if (status) {
294 mlog_errno(status);
295 goto out_commit;
296 }
297
298 i_size_write(inode, new_i_size);
299 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
300 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
301
302 di = (struct ocfs2_dinode *) fe_bh->b_data;
303 di->i_size = cpu_to_le64(new_i_size);
304 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
305 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
306
307 status = ocfs2_journal_dirty(handle, fe_bh);
308 if (status < 0)
309 mlog_errno(status);
310
311 out_commit:
312 ocfs2_commit_trans(osb, handle);
313 out:
314
315 mlog_exit(status);
316 return status;
317 }
318
319 static int ocfs2_truncate_file(struct inode *inode,
320 struct buffer_head *di_bh,
321 u64 new_i_size)
322 {
323 int status = 0;
324 struct ocfs2_dinode *fe = NULL;
325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
326 struct ocfs2_truncate_context *tc = NULL;
327
328 mlog_entry("(inode = %llu, new_i_size = %llu\n",
329 (unsigned long long)OCFS2_I(inode)->ip_blkno,
330 (unsigned long long)new_i_size);
331
332 fe = (struct ocfs2_dinode *) di_bh->b_data;
333 if (!OCFS2_IS_VALID_DINODE(fe)) {
334 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
335 status = -EIO;
336 goto bail;
337 }
338
339 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
340 "Inode %llu, inode i_size = %lld != di "
341 "i_size = %llu, i_flags = 0x%x\n",
342 (unsigned long long)OCFS2_I(inode)->ip_blkno,
343 i_size_read(inode),
344 (unsigned long long)le64_to_cpu(fe->i_size),
345 le32_to_cpu(fe->i_flags));
346
347 if (new_i_size > le64_to_cpu(fe->i_size)) {
348 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
349 (unsigned long long)le64_to_cpu(fe->i_size),
350 (unsigned long long)new_i_size);
351 status = -EINVAL;
352 mlog_errno(status);
353 goto bail;
354 }
355
356 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
357 (unsigned long long)le64_to_cpu(fe->i_blkno),
358 (unsigned long long)le64_to_cpu(fe->i_size),
359 (unsigned long long)new_i_size);
360
361 /* lets handle the simple truncate cases before doing any more
362 * cluster locking. */
363 if (new_i_size == le64_to_cpu(fe->i_size))
364 goto bail;
365
366 down_write(&OCFS2_I(inode)->ip_alloc_sem);
367
368 /* This forces other nodes to sync and drop their pages. Do
369 * this even if we have a truncate without allocation change -
370 * ocfs2 cluster sizes can be much greater than page size, so
371 * we have to truncate them anyway. */
372 status = ocfs2_data_lock(inode, 1);
373 if (status < 0) {
374 up_write(&OCFS2_I(inode)->ip_alloc_sem);
375
376 mlog_errno(status);
377 goto bail;
378 }
379
380 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
381 truncate_inode_pages(inode->i_mapping, new_i_size);
382
383 /* alright, we're going to need to do a full blown alloc size
384 * change. Orphan the inode so that recovery can complete the
385 * truncate if necessary. This does the task of marking
386 * i_size. */
387 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
388 if (status < 0) {
389 mlog_errno(status);
390 goto bail_unlock_data;
391 }
392
393 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
394 if (status < 0) {
395 mlog_errno(status);
396 goto bail_unlock_data;
397 }
398
399 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
400 if (status < 0) {
401 mlog_errno(status);
402 goto bail_unlock_data;
403 }
404
405 /* TODO: orphan dir cleanup here. */
406 bail_unlock_data:
407 ocfs2_data_unlock(inode, 1);
408
409 up_write(&OCFS2_I(inode)->ip_alloc_sem);
410
411 bail:
412
413 mlog_exit(status);
414 return status;
415 }
416
417 /*
418 * extend allocation only here.
419 * we'll update all the disk stuff, and oip->alloc_size
420 *
421 * expect stuff to be locked, a transaction started and enough data /
422 * metadata reservations in the contexts.
423 *
424 * Will return -EAGAIN, and a reason if a restart is needed.
425 * If passed in, *reason will always be set, even in error.
426 */
427 int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
428 struct inode *inode,
429 u32 *logical_offset,
430 u32 clusters_to_add,
431 int mark_unwritten,
432 struct buffer_head *fe_bh,
433 handle_t *handle,
434 struct ocfs2_alloc_context *data_ac,
435 struct ocfs2_alloc_context *meta_ac,
436 enum ocfs2_alloc_restarted *reason_ret)
437 {
438 int status = 0;
439 int free_extents;
440 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
441 enum ocfs2_alloc_restarted reason = RESTART_NONE;
442 u32 bit_off, num_bits;
443 u64 block;
444 u8 flags = 0;
445
446 BUG_ON(!clusters_to_add);
447
448 if (mark_unwritten)
449 flags = OCFS2_EXT_UNWRITTEN;
450
451 free_extents = ocfs2_num_free_extents(osb, inode, fe);
452 if (free_extents < 0) {
453 status = free_extents;
454 mlog_errno(status);
455 goto leave;
456 }
457
458 /* there are two cases which could cause us to EAGAIN in the
459 * we-need-more-metadata case:
460 * 1) we haven't reserved *any*
461 * 2) we are so fragmented, we've needed to add metadata too
462 * many times. */
463 if (!free_extents && !meta_ac) {
464 mlog(0, "we haven't reserved any metadata!\n");
465 status = -EAGAIN;
466 reason = RESTART_META;
467 goto leave;
468 } else if ((!free_extents)
469 && (ocfs2_alloc_context_bits_left(meta_ac)
470 < ocfs2_extend_meta_needed(fe))) {
471 mlog(0, "filesystem is really fragmented...\n");
472 status = -EAGAIN;
473 reason = RESTART_META;
474 goto leave;
475 }
476
477 status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
478 &bit_off, &num_bits);
479 if (status < 0) {
480 if (status != -ENOSPC)
481 mlog_errno(status);
482 goto leave;
483 }
484
485 BUG_ON(num_bits > clusters_to_add);
486
487 /* reserve our write early -- insert_extent may update the inode */
488 status = ocfs2_journal_access(handle, inode, fe_bh,
489 OCFS2_JOURNAL_ACCESS_WRITE);
490 if (status < 0) {
491 mlog_errno(status);
492 goto leave;
493 }
494
495 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
496 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
497 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
498 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
499 *logical_offset, block, num_bits,
500 flags, meta_ac);
501 if (status < 0) {
502 mlog_errno(status);
503 goto leave;
504 }
505
506 status = ocfs2_journal_dirty(handle, fe_bh);
507 if (status < 0) {
508 mlog_errno(status);
509 goto leave;
510 }
511
512 clusters_to_add -= num_bits;
513 *logical_offset += num_bits;
514
515 if (clusters_to_add) {
516 mlog(0, "need to alloc once more, clusters = %u, wanted = "
517 "%u\n", fe->i_clusters, clusters_to_add);
518 status = -EAGAIN;
519 reason = RESTART_TRANS;
520 }
521
522 leave:
523 mlog_exit(status);
524 if (reason_ret)
525 *reason_ret = reason;
526 return status;
527 }
528
529 /*
530 * For a given allocation, determine which allocators will need to be
531 * accessed, and lock them, reserving the appropriate number of bits.
532 *
533 * Sparse file systems call this from ocfs2_write_begin_nolock()
534 * and ocfs2_allocate_unwritten_extents().
535 *
536 * File systems which don't support holes call this from
537 * ocfs2_extend_allocation().
538 */
539 int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
540 u32 clusters_to_add, u32 extents_to_split,
541 struct ocfs2_alloc_context **data_ac,
542 struct ocfs2_alloc_context **meta_ac)
543 {
544 int ret, num_free_extents;
545 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
546 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
547
548 *meta_ac = NULL;
549 *data_ac = NULL;
550
551 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
552 "clusters_to_add = %u, extents_to_split = %u\n",
553 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
554 le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
555
556 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
557 if (num_free_extents < 0) {
558 ret = num_free_extents;
559 mlog_errno(ret);
560 goto out;
561 }
562
563 /*
564 * Sparse allocation file systems need to be more conservative
565 * with reserving room for expansion - the actual allocation
566 * happens while we've got a journal handle open so re-taking
567 * a cluster lock (because we ran out of room for another
568 * extent) will violate ordering rules.
569 *
570 * Most of the time we'll only be seeing this 1 cluster at a time
571 * anyway.
572 *
573 * Always lock for any unwritten extents - we might want to
574 * add blocks during a split.
575 */
576 if (!num_free_extents ||
577 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
578 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
579 if (ret < 0) {
580 if (ret != -ENOSPC)
581 mlog_errno(ret);
582 goto out;
583 }
584 }
585
586 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
587 if (ret < 0) {
588 if (ret != -ENOSPC)
589 mlog_errno(ret);
590 goto out;
591 }
592
593 out:
594 if (ret) {
595 if (*meta_ac) {
596 ocfs2_free_alloc_context(*meta_ac);
597 *meta_ac = NULL;
598 }
599
600 /*
601 * We cannot have an error and a non null *data_ac.
602 */
603 }
604
605 return ret;
606 }
607
608 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
609 u32 clusters_to_add, int mark_unwritten)
610 {
611 int status = 0;
612 int restart_func = 0;
613 int credits;
614 u32 prev_clusters;
615 struct buffer_head *bh = NULL;
616 struct ocfs2_dinode *fe = NULL;
617 handle_t *handle = NULL;
618 struct ocfs2_alloc_context *data_ac = NULL;
619 struct ocfs2_alloc_context *meta_ac = NULL;
620 enum ocfs2_alloc_restarted why;
621 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
622
623 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
624
625 /*
626 * This function only exists for file systems which don't
627 * support holes.
628 */
629 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
630
631 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
632 OCFS2_BH_CACHED, inode);
633 if (status < 0) {
634 mlog_errno(status);
635 goto leave;
636 }
637
638 fe = (struct ocfs2_dinode *) bh->b_data;
639 if (!OCFS2_IS_VALID_DINODE(fe)) {
640 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
641 status = -EIO;
642 goto leave;
643 }
644
645 restart_all:
646 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
647
648 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
649 &meta_ac);
650 if (status) {
651 mlog_errno(status);
652 goto leave;
653 }
654
655 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
656 handle = ocfs2_start_trans(osb, credits);
657 if (IS_ERR(handle)) {
658 status = PTR_ERR(handle);
659 handle = NULL;
660 mlog_errno(status);
661 goto leave;
662 }
663
664 restarted_transaction:
665 /* reserve a write to the file entry early on - that we if we
666 * run out of credits in the allocation path, we can still
667 * update i_size. */
668 status = ocfs2_journal_access(handle, inode, bh,
669 OCFS2_JOURNAL_ACCESS_WRITE);
670 if (status < 0) {
671 mlog_errno(status);
672 goto leave;
673 }
674
675 prev_clusters = OCFS2_I(inode)->ip_clusters;
676
677 status = ocfs2_do_extend_allocation(osb,
678 inode,
679 &logical_start,
680 clusters_to_add,
681 mark_unwritten,
682 bh,
683 handle,
684 data_ac,
685 meta_ac,
686 &why);
687 if ((status < 0) && (status != -EAGAIN)) {
688 if (status != -ENOSPC)
689 mlog_errno(status);
690 goto leave;
691 }
692
693 status = ocfs2_journal_dirty(handle, bh);
694 if (status < 0) {
695 mlog_errno(status);
696 goto leave;
697 }
698
699 spin_lock(&OCFS2_I(inode)->ip_lock);
700 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
701 spin_unlock(&OCFS2_I(inode)->ip_lock);
702
703 if (why != RESTART_NONE && clusters_to_add) {
704 if (why == RESTART_META) {
705 mlog(0, "restarting function.\n");
706 restart_func = 1;
707 } else {
708 BUG_ON(why != RESTART_TRANS);
709
710 mlog(0, "restarting transaction.\n");
711 /* TODO: This can be more intelligent. */
712 credits = ocfs2_calc_extend_credits(osb->sb,
713 fe,
714 clusters_to_add);
715 status = ocfs2_extend_trans(handle, credits);
716 if (status < 0) {
717 /* handle still has to be committed at
718 * this point. */
719 status = -ENOMEM;
720 mlog_errno(status);
721 goto leave;
722 }
723 goto restarted_transaction;
724 }
725 }
726
727 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
728 le32_to_cpu(fe->i_clusters),
729 (unsigned long long)le64_to_cpu(fe->i_size));
730 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
731 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
732
733 leave:
734 if (handle) {
735 ocfs2_commit_trans(osb, handle);
736 handle = NULL;
737 }
738 if (data_ac) {
739 ocfs2_free_alloc_context(data_ac);
740 data_ac = NULL;
741 }
742 if (meta_ac) {
743 ocfs2_free_alloc_context(meta_ac);
744 meta_ac = NULL;
745 }
746 if ((!status) && restart_func) {
747 restart_func = 0;
748 goto restart_all;
749 }
750 if (bh) {
751 brelse(bh);
752 bh = NULL;
753 }
754
755 mlog_exit(status);
756 return status;
757 }
758
759 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
760 u32 clusters_to_add, int mark_unwritten)
761 {
762 int ret;
763
764 /*
765 * The alloc sem blocks peope in read/write from reading our
766 * allocation until we're done changing it. We depend on
767 * i_mutex to block other extend/truncate calls while we're
768 * here.
769 */
770 down_write(&OCFS2_I(inode)->ip_alloc_sem);
771 ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
772 mark_unwritten);
773 up_write(&OCFS2_I(inode)->ip_alloc_sem);
774
775 return ret;
776 }
777
778 /* Some parts of this taken from generic_cont_expand, which turned out
779 * to be too fragile to do exactly what we need without us having to
780 * worry about recursive locking in ->prepare_write() and
781 * ->commit_write(). */
782 static int ocfs2_write_zero_page(struct inode *inode,
783 u64 size)
784 {
785 struct address_space *mapping = inode->i_mapping;
786 struct page *page;
787 unsigned long index;
788 unsigned int offset;
789 handle_t *handle = NULL;
790 int ret;
791
792 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
793 /* ugh. in prepare/commit_write, if from==to==start of block, we
794 ** skip the prepare. make sure we never send an offset for the start
795 ** of a block
796 */
797 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
798 offset++;
799 }
800 index = size >> PAGE_CACHE_SHIFT;
801
802 page = grab_cache_page(mapping, index);
803 if (!page) {
804 ret = -ENOMEM;
805 mlog_errno(ret);
806 goto out;
807 }
808
809 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
810 if (ret < 0) {
811 mlog_errno(ret);
812 goto out_unlock;
813 }
814
815 if (ocfs2_should_order_data(inode)) {
816 handle = ocfs2_start_walk_page_trans(inode, page, offset,
817 offset);
818 if (IS_ERR(handle)) {
819 ret = PTR_ERR(handle);
820 handle = NULL;
821 goto out_unlock;
822 }
823 }
824
825 /* must not update i_size! */
826 ret = block_commit_write(page, offset, offset);
827 if (ret < 0)
828 mlog_errno(ret);
829 else
830 ret = 0;
831
832 if (handle)
833 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
834 out_unlock:
835 unlock_page(page);
836 page_cache_release(page);
837 out:
838 return ret;
839 }
840
841 static int ocfs2_zero_extend(struct inode *inode,
842 u64 zero_to_size)
843 {
844 int ret = 0;
845 u64 start_off;
846 struct super_block *sb = inode->i_sb;
847
848 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
849 while (start_off < zero_to_size) {
850 ret = ocfs2_write_zero_page(inode, start_off);
851 if (ret < 0) {
852 mlog_errno(ret);
853 goto out;
854 }
855
856 start_off += sb->s_blocksize;
857
858 /*
859 * Very large extends have the potential to lock up
860 * the cpu for extended periods of time.
861 */
862 cond_resched();
863 }
864
865 out:
866 return ret;
867 }
868
869 /*
870 * A tail_to_skip value > 0 indicates that we're being called from
871 * ocfs2_file_aio_write(). This has the following implications:
872 *
873 * - we don't want to update i_size
874 * - di_bh will be NULL, which is fine because it's only used in the
875 * case where we want to update i_size.
876 * - ocfs2_zero_extend() will then only be filling the hole created
877 * between i_size and the start of the write.
878 */
879 static int ocfs2_extend_file(struct inode *inode,
880 struct buffer_head *di_bh,
881 u64 new_i_size,
882 size_t tail_to_skip)
883 {
884 int ret = 0;
885 u32 clusters_to_add = 0;
886
887 BUG_ON(!tail_to_skip && !di_bh);
888
889 /* setattr sometimes calls us like this. */
890 if (new_i_size == 0)
891 goto out;
892
893 if (i_size_read(inode) == new_i_size)
894 goto out;
895 BUG_ON(new_i_size < i_size_read(inode));
896
897 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
898 BUG_ON(tail_to_skip != 0);
899 goto out_update_size;
900 }
901
902 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
903 OCFS2_I(inode)->ip_clusters;
904
905 /*
906 * protect the pages that ocfs2_zero_extend is going to be
907 * pulling into the page cache.. we do this before the
908 * metadata extend so that we don't get into the situation
909 * where we've extended the metadata but can't get the data
910 * lock to zero.
911 */
912 ret = ocfs2_data_lock(inode, 1);
913 if (ret < 0) {
914 mlog_errno(ret);
915 goto out;
916 }
917
918 if (clusters_to_add) {
919 ret = ocfs2_extend_allocation(inode,
920 OCFS2_I(inode)->ip_clusters,
921 clusters_to_add, 0);
922 if (ret < 0) {
923 mlog_errno(ret);
924 goto out_unlock;
925 }
926 }
927
928 /*
929 * Call this even if we don't add any clusters to the tree. We
930 * still need to zero the area between the old i_size and the
931 * new i_size.
932 */
933 ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
934 if (ret < 0) {
935 mlog_errno(ret);
936 goto out_unlock;
937 }
938
939 out_update_size:
940 if (!tail_to_skip) {
941 /* We're being called from ocfs2_setattr() which wants
942 * us to update i_size */
943 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
944 if (ret < 0)
945 mlog_errno(ret);
946 }
947
948 out_unlock:
949 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
950 ocfs2_data_unlock(inode, 1);
951
952 out:
953 return ret;
954 }
955
956 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
957 {
958 int status = 0, size_change;
959 struct inode *inode = dentry->d_inode;
960 struct super_block *sb = inode->i_sb;
961 struct ocfs2_super *osb = OCFS2_SB(sb);
962 struct buffer_head *bh = NULL;
963 handle_t *handle = NULL;
964
965 mlog_entry("(0x%p, '%.*s')\n", dentry,
966 dentry->d_name.len, dentry->d_name.name);
967
968 if (attr->ia_valid & ATTR_MODE)
969 mlog(0, "mode change: %d\n", attr->ia_mode);
970 if (attr->ia_valid & ATTR_UID)
971 mlog(0, "uid change: %d\n", attr->ia_uid);
972 if (attr->ia_valid & ATTR_GID)
973 mlog(0, "gid change: %d\n", attr->ia_gid);
974 if (attr->ia_valid & ATTR_SIZE)
975 mlog(0, "size change...\n");
976 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
977 mlog(0, "time change...\n");
978
979 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
980 | ATTR_GID | ATTR_UID | ATTR_MODE)
981 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
982 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
983 return 0;
984 }
985
986 status = inode_change_ok(inode, attr);
987 if (status)
988 return status;
989
990 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
991 if (size_change) {
992 status = ocfs2_rw_lock(inode, 1);
993 if (status < 0) {
994 mlog_errno(status);
995 goto bail;
996 }
997 }
998
999 status = ocfs2_meta_lock(inode, &bh, 1);
1000 if (status < 0) {
1001 if (status != -ENOENT)
1002 mlog_errno(status);
1003 goto bail_unlock_rw;
1004 }
1005
1006 if (size_change && attr->ia_size != i_size_read(inode)) {
1007 if (i_size_read(inode) > attr->ia_size)
1008 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1009 else
1010 status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
1011 if (status < 0) {
1012 if (status != -ENOSPC)
1013 mlog_errno(status);
1014 status = -ENOSPC;
1015 goto bail_unlock;
1016 }
1017 }
1018
1019 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1020 if (IS_ERR(handle)) {
1021 status = PTR_ERR(handle);
1022 mlog_errno(status);
1023 goto bail_unlock;
1024 }
1025
1026 /*
1027 * This will intentionally not wind up calling vmtruncate(),
1028 * since all the work for a size change has been done above.
1029 * Otherwise, we could get into problems with truncate as
1030 * ip_alloc_sem is used there to protect against i_size
1031 * changes.
1032 */
1033 status = inode_setattr(inode, attr);
1034 if (status < 0) {
1035 mlog_errno(status);
1036 goto bail_commit;
1037 }
1038
1039 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1040 if (status < 0)
1041 mlog_errno(status);
1042
1043 bail_commit:
1044 ocfs2_commit_trans(osb, handle);
1045 bail_unlock:
1046 ocfs2_meta_unlock(inode, 1);
1047 bail_unlock_rw:
1048 if (size_change)
1049 ocfs2_rw_unlock(inode, 1);
1050 bail:
1051 if (bh)
1052 brelse(bh);
1053
1054 mlog_exit(status);
1055 return status;
1056 }
1057
1058 int ocfs2_getattr(struct vfsmount *mnt,
1059 struct dentry *dentry,
1060 struct kstat *stat)
1061 {
1062 struct inode *inode = dentry->d_inode;
1063 struct super_block *sb = dentry->d_inode->i_sb;
1064 struct ocfs2_super *osb = sb->s_fs_info;
1065 int err;
1066
1067 mlog_entry_void();
1068
1069 err = ocfs2_inode_revalidate(dentry);
1070 if (err) {
1071 if (err != -ENOENT)
1072 mlog_errno(err);
1073 goto bail;
1074 }
1075
1076 generic_fillattr(inode, stat);
1077
1078 /* We set the blksize from the cluster size for performance */
1079 stat->blksize = osb->s_clustersize;
1080
1081 bail:
1082 mlog_exit(err);
1083
1084 return err;
1085 }
1086
1087 int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
1088 {
1089 int ret;
1090
1091 mlog_entry_void();
1092
1093 ret = ocfs2_meta_lock(inode, NULL, 0);
1094 if (ret) {
1095 if (ret != -ENOENT)
1096 mlog_errno(ret);
1097 goto out;
1098 }
1099
1100 ret = generic_permission(inode, mask, NULL);
1101
1102 ocfs2_meta_unlock(inode, 0);
1103 out:
1104 mlog_exit(ret);
1105 return ret;
1106 }
1107
1108 static int ocfs2_write_remove_suid(struct inode *inode)
1109 {
1110 int ret;
1111 struct buffer_head *bh = NULL;
1112 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1113 handle_t *handle;
1114 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1115 struct ocfs2_dinode *di;
1116
1117 mlog_entry("(Inode %llu, mode 0%o)\n",
1118 (unsigned long long)oi->ip_blkno, inode->i_mode);
1119
1120 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1121 if (handle == NULL) {
1122 ret = -ENOMEM;
1123 mlog_errno(ret);
1124 goto out;
1125 }
1126
1127 ret = ocfs2_read_block(osb, oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1128 if (ret < 0) {
1129 mlog_errno(ret);
1130 goto out_trans;
1131 }
1132
1133 ret = ocfs2_journal_access(handle, inode, bh,
1134 OCFS2_JOURNAL_ACCESS_WRITE);
1135 if (ret < 0) {
1136 mlog_errno(ret);
1137 goto out_bh;
1138 }
1139
1140 inode->i_mode &= ~S_ISUID;
1141 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1142 inode->i_mode &= ~S_ISGID;
1143
1144 di = (struct ocfs2_dinode *) bh->b_data;
1145 di->i_mode = cpu_to_le16(inode->i_mode);
1146
1147 ret = ocfs2_journal_dirty(handle, bh);
1148 if (ret < 0)
1149 mlog_errno(ret);
1150 out_bh:
1151 brelse(bh);
1152 out_trans:
1153 ocfs2_commit_trans(osb, handle);
1154 out:
1155 mlog_exit(ret);
1156 return ret;
1157 }
1158
1159 /*
1160 * Will look for holes and unwritten extents in the range starting at
1161 * pos for count bytes (inclusive).
1162 */
1163 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1164 size_t count)
1165 {
1166 int ret = 0;
1167 unsigned int extent_flags;
1168 u32 cpos, clusters, extent_len, phys_cpos;
1169 struct super_block *sb = inode->i_sb;
1170
1171 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1172 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1173
1174 while (clusters) {
1175 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1176 &extent_flags);
1177 if (ret < 0) {
1178 mlog_errno(ret);
1179 goto out;
1180 }
1181
1182 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1183 ret = 1;
1184 break;
1185 }
1186
1187 if (extent_len > clusters)
1188 extent_len = clusters;
1189
1190 clusters -= extent_len;
1191 cpos += extent_len;
1192 }
1193 out:
1194 return ret;
1195 }
1196
1197 /*
1198 * Allocate enough extents to cover the region starting at byte offset
1199 * start for len bytes. Existing extents are skipped, any extents
1200 * added are marked as "unwritten".
1201 */
1202 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1203 u64 start, u64 len)
1204 {
1205 int ret;
1206 u32 cpos, phys_cpos, clusters, alloc_size;
1207
1208 /*
1209 * We consider both start and len to be inclusive.
1210 */
1211 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1212 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1213 clusters -= cpos;
1214
1215 while (clusters) {
1216 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1217 &alloc_size, NULL);
1218 if (ret) {
1219 mlog_errno(ret);
1220 goto out;
1221 }
1222
1223 /*
1224 * Hole or existing extent len can be arbitrary, so
1225 * cap it to our own allocation request.
1226 */
1227 if (alloc_size > clusters)
1228 alloc_size = clusters;
1229
1230 if (phys_cpos) {
1231 /*
1232 * We already have an allocation at this
1233 * region so we can safely skip it.
1234 */
1235 goto next;
1236 }
1237
1238 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1239 if (ret) {
1240 if (ret != -ENOSPC)
1241 mlog_errno(ret);
1242 goto out;
1243 }
1244
1245 next:
1246 cpos += alloc_size;
1247 clusters -= alloc_size;
1248 }
1249
1250 ret = 0;
1251 out:
1252 return ret;
1253 }
1254
1255 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1256 loff_t *ppos,
1257 size_t count,
1258 int appending,
1259 int *direct_io)
1260 {
1261 int ret = 0, meta_level = appending;
1262 struct inode *inode = dentry->d_inode;
1263 u32 clusters;
1264 loff_t newsize, saved_pos;
1265
1266 /*
1267 * We sample i_size under a read level meta lock to see if our write
1268 * is extending the file, if it is we back off and get a write level
1269 * meta lock.
1270 */
1271 for(;;) {
1272 ret = ocfs2_meta_lock(inode, NULL, meta_level);
1273 if (ret < 0) {
1274 meta_level = -1;
1275 mlog_errno(ret);
1276 goto out;
1277 }
1278
1279 /* Clear suid / sgid if necessary. We do this here
1280 * instead of later in the write path because
1281 * remove_suid() calls ->setattr without any hint that
1282 * we may have already done our cluster locking. Since
1283 * ocfs2_setattr() *must* take cluster locks to
1284 * proceeed, this will lead us to recursively lock the
1285 * inode. There's also the dinode i_size state which
1286 * can be lost via setattr during extending writes (we
1287 * set inode->i_size at the end of a write. */
1288 if (should_remove_suid(dentry)) {
1289 if (meta_level == 0) {
1290 ocfs2_meta_unlock(inode, meta_level);
1291 meta_level = 1;
1292 continue;
1293 }
1294
1295 ret = ocfs2_write_remove_suid(inode);
1296 if (ret < 0) {
1297 mlog_errno(ret);
1298 goto out_unlock;
1299 }
1300 }
1301
1302 /* work on a copy of ppos until we're sure that we won't have
1303 * to recalculate it due to relocking. */
1304 if (appending) {
1305 saved_pos = i_size_read(inode);
1306 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1307 } else {
1308 saved_pos = *ppos;
1309 }
1310
1311 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1312 loff_t end = saved_pos + count;
1313
1314 /*
1315 * Skip the O_DIRECT checks if we don't need
1316 * them.
1317 */
1318 if (!direct_io || !(*direct_io))
1319 break;
1320
1321 /*
1322 * Allowing concurrent direct writes means
1323 * i_size changes wouldn't be synchronized, so
1324 * one node could wind up truncating another
1325 * nodes writes.
1326 */
1327 if (end > i_size_read(inode)) {
1328 *direct_io = 0;
1329 break;
1330 }
1331
1332 /*
1333 * We don't fill holes during direct io, so
1334 * check for them here. If any are found, the
1335 * caller will have to retake some cluster
1336 * locks and initiate the io as buffered.
1337 */
1338 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1339 count);
1340 if (ret == 1) {
1341 *direct_io = 0;
1342 ret = 0;
1343 } else if (ret < 0)
1344 mlog_errno(ret);
1345 break;
1346 }
1347
1348 /*
1349 * The rest of this loop is concerned with legacy file
1350 * systems which don't support sparse files.
1351 */
1352
1353 newsize = count + saved_pos;
1354
1355 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1356 (long long) saved_pos, (long long) newsize,
1357 (long long) i_size_read(inode));
1358
1359 /* No need for a higher level metadata lock if we're
1360 * never going past i_size. */
1361 if (newsize <= i_size_read(inode))
1362 break;
1363
1364 if (meta_level == 0) {
1365 ocfs2_meta_unlock(inode, meta_level);
1366 meta_level = 1;
1367 continue;
1368 }
1369
1370 spin_lock(&OCFS2_I(inode)->ip_lock);
1371 clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
1372 OCFS2_I(inode)->ip_clusters;
1373 spin_unlock(&OCFS2_I(inode)->ip_lock);
1374
1375 mlog(0, "Writing at EOF, may need more allocation: "
1376 "i_size = %lld, newsize = %lld, need %u clusters\n",
1377 (long long) i_size_read(inode), (long long) newsize,
1378 clusters);
1379
1380 /* We only want to continue the rest of this loop if
1381 * our extend will actually require more
1382 * allocation. */
1383 if (!clusters)
1384 break;
1385
1386 ret = ocfs2_extend_file(inode, NULL, newsize, count);
1387 if (ret < 0) {
1388 if (ret != -ENOSPC)
1389 mlog_errno(ret);
1390 goto out_unlock;
1391 }
1392 break;
1393 }
1394
1395 if (appending)
1396 *ppos = saved_pos;
1397
1398 out_unlock:
1399 ocfs2_meta_unlock(inode, meta_level);
1400
1401 out:
1402 return ret;
1403 }
1404
1405 static inline void
1406 ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1407 {
1408 const struct iovec *iov = *iovp;
1409 size_t base = *basep;
1410
1411 do {
1412 int copy = min(bytes, iov->iov_len - base);
1413
1414 bytes -= copy;
1415 base += copy;
1416 if (iov->iov_len == base) {
1417 iov++;
1418 base = 0;
1419 }
1420 } while (bytes);
1421 *iovp = iov;
1422 *basep = base;
1423 }
1424
1425 static struct page * ocfs2_get_write_source(char **ret_src_buf,
1426 const struct iovec *cur_iov,
1427 size_t iov_offset)
1428 {
1429 int ret;
1430 char *buf = cur_iov->iov_base + iov_offset;
1431 struct page *src_page = NULL;
1432 unsigned long off;
1433
1434 off = (unsigned long)(buf) & ~PAGE_CACHE_MASK;
1435
1436 if (!segment_eq(get_fs(), KERNEL_DS)) {
1437 /*
1438 * Pull in the user page. We want to do this outside
1439 * of the meta data locks in order to preserve locking
1440 * order in case of page fault.
1441 */
1442 ret = get_user_pages(current, current->mm,
1443 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1444 0, 0, &src_page, NULL);
1445 if (ret == 1)
1446 *ret_src_buf = kmap(src_page) + off;
1447 else
1448 src_page = ERR_PTR(-EFAULT);
1449 } else {
1450 *ret_src_buf = buf;
1451 }
1452
1453 return src_page;
1454 }
1455
1456 static void ocfs2_put_write_source(struct page *page)
1457 {
1458 if (page) {
1459 kunmap(page);
1460 page_cache_release(page);
1461 }
1462 }
1463
1464 static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1465 const struct iovec *iov,
1466 unsigned long nr_segs,
1467 size_t count,
1468 ssize_t o_direct_written)
1469 {
1470 int ret = 0;
1471 ssize_t copied, total = 0;
1472 size_t iov_offset = 0, bytes;
1473 loff_t pos;
1474 const struct iovec *cur_iov = iov;
1475 struct page *user_page, *page;
1476 char *buf, *dst;
1477 void *fsdata;
1478
1479 /*
1480 * handle partial DIO write. Adjust cur_iov if needed.
1481 */
1482 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1483
1484 do {
1485 pos = *ppos;
1486
1487 user_page = ocfs2_get_write_source(&buf, cur_iov, iov_offset);
1488 if (IS_ERR(user_page)) {
1489 ret = PTR_ERR(user_page);
1490 goto out;
1491 }
1492
1493 /* Stay within our page boundaries */
1494 bytes = min((PAGE_CACHE_SIZE - ((unsigned long)pos & ~PAGE_CACHE_MASK)),
1495 (PAGE_CACHE_SIZE - ((unsigned long)buf & ~PAGE_CACHE_MASK)));
1496 /* Stay within the vector boundary */
1497 bytes = min_t(size_t, bytes, cur_iov->iov_len - iov_offset);
1498 /* Stay within count */
1499 bytes = min(bytes, count);
1500
1501 page = NULL;
1502 ret = ocfs2_write_begin(file, file->f_mapping, pos, bytes, 0,
1503 &page, &fsdata);
1504 if (ret) {
1505 mlog_errno(ret);
1506 goto out;
1507 }
1508
1509 dst = kmap_atomic(page, KM_USER0);
1510 memcpy(dst + (pos & (PAGE_CACHE_SIZE - 1)), buf, bytes);
1511 kunmap_atomic(dst, KM_USER0);
1512 flush_dcache_page(page);
1513 ocfs2_put_write_source(user_page);
1514
1515 copied = ocfs2_write_end(file, file->f_mapping, pos, bytes,
1516 bytes, page, fsdata);
1517 if (copied < 0) {
1518 mlog_errno(copied);
1519 ret = copied;
1520 goto out;
1521 }
1522
1523 total += copied;
1524 *ppos = pos + copied;
1525 count -= copied;
1526
1527 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1528 } while(count);
1529
1530 out:
1531 return total ? total : ret;
1532 }
1533
1534 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1535 const struct iovec *iov,
1536 unsigned long nr_segs,
1537 loff_t pos)
1538 {
1539 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1540 int can_do_direct, sync = 0;
1541 ssize_t written = 0;
1542 size_t ocount; /* original count */
1543 size_t count; /* after file limit checks */
1544 loff_t *ppos = &iocb->ki_pos;
1545 struct file *file = iocb->ki_filp;
1546 struct inode *inode = file->f_path.dentry->d_inode;
1547
1548 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1549 (unsigned int)nr_segs,
1550 file->f_path.dentry->d_name.len,
1551 file->f_path.dentry->d_name.name);
1552
1553 if (iocb->ki_left == 0)
1554 return 0;
1555
1556 ret = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1557 if (ret)
1558 return ret;
1559
1560 count = ocount;
1561
1562 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1563
1564 appending = file->f_flags & O_APPEND ? 1 : 0;
1565 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1566
1567 mutex_lock(&inode->i_mutex);
1568
1569 relock:
1570 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1571 if (direct_io) {
1572 down_read(&inode->i_alloc_sem);
1573 have_alloc_sem = 1;
1574 }
1575
1576 /* concurrent O_DIRECT writes are allowed */
1577 rw_level = !direct_io;
1578 ret = ocfs2_rw_lock(inode, rw_level);
1579 if (ret < 0) {
1580 mlog_errno(ret);
1581 goto out_sems;
1582 }
1583
1584 can_do_direct = direct_io;
1585 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1586 iocb->ki_left, appending,
1587 &can_do_direct);
1588 if (ret < 0) {
1589 mlog_errno(ret);
1590 goto out;
1591 }
1592
1593 /*
1594 * We can't complete the direct I/O as requested, fall back to
1595 * buffered I/O.
1596 */
1597 if (direct_io && !can_do_direct) {
1598 ocfs2_rw_unlock(inode, rw_level);
1599 up_read(&inode->i_alloc_sem);
1600
1601 have_alloc_sem = 0;
1602 rw_level = -1;
1603
1604 direct_io = 0;
1605 sync = 1;
1606 goto relock;
1607 }
1608
1609 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
1610 sync = 1;
1611
1612 /*
1613 * XXX: Is it ok to execute these checks a second time?
1614 */
1615 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
1616 if (ret)
1617 goto out;
1618
1619 /*
1620 * Set pos so that sync_page_range_nolock() below understands
1621 * where to start from. We might've moved it around via the
1622 * calls above. The range we want to actually sync starts from
1623 * *ppos here.
1624 *
1625 */
1626 pos = *ppos;
1627
1628 /* communicate with ocfs2_dio_end_io */
1629 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1630
1631 if (direct_io) {
1632 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
1633 ppos, count, ocount);
1634 if (written < 0) {
1635 ret = written;
1636 goto out_dio;
1637 }
1638 } else {
1639 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
1640 count, written);
1641 if (written < 0) {
1642 ret = written;
1643 if (ret != -EFAULT || ret != -ENOSPC)
1644 mlog_errno(ret);
1645 goto out;
1646 }
1647 }
1648
1649 out_dio:
1650 /* buffered aio wouldn't have proper lock coverage today */
1651 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
1652
1653 /*
1654 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
1655 * function pointer which is called when o_direct io completes so that
1656 * it can unlock our rw lock. (it's the clustered equivalent of
1657 * i_alloc_sem; protects truncate from racing with pending ios).
1658 * Unfortunately there are error cases which call end_io and others
1659 * that don't. so we don't have to unlock the rw_lock if either an
1660 * async dio is going to do it in the future or an end_io after an
1661 * error has already done it.
1662 */
1663 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1664 rw_level = -1;
1665 have_alloc_sem = 0;
1666 }
1667
1668 out:
1669 if (rw_level != -1)
1670 ocfs2_rw_unlock(inode, rw_level);
1671
1672 out_sems:
1673 if (have_alloc_sem)
1674 up_read(&inode->i_alloc_sem);
1675
1676 if (written > 0 && sync) {
1677 ssize_t err;
1678
1679 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
1680 if (err < 0)
1681 written = err;
1682 }
1683
1684 mutex_unlock(&inode->i_mutex);
1685
1686 mlog_exit(ret);
1687 return written ? written : ret;
1688 }
1689
1690 static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
1691 struct pipe_buffer *buf,
1692 struct splice_desc *sd)
1693 {
1694 int ret, count;
1695 ssize_t copied = 0;
1696 struct file *file = sd->u.file;
1697 unsigned int offset;
1698 struct page *page = NULL;
1699 void *fsdata;
1700 char *src, *dst;
1701
1702 ret = buf->ops->confirm(pipe, buf);
1703 if (ret)
1704 goto out;
1705
1706 offset = sd->pos & ~PAGE_CACHE_MASK;
1707 count = sd->len;
1708 if (count + offset > PAGE_CACHE_SIZE)
1709 count = PAGE_CACHE_SIZE - offset;
1710
1711 ret = ocfs2_write_begin(file, file->f_mapping, sd->pos, count, 0,
1712 &page, &fsdata);
1713 if (ret) {
1714 mlog_errno(ret);
1715 goto out;
1716 }
1717
1718 src = buf->ops->map(pipe, buf, 1);
1719 dst = kmap_atomic(page, KM_USER1);
1720 memcpy(dst + offset, src + buf->offset, count);
1721 kunmap_atomic(page, KM_USER1);
1722 buf->ops->unmap(pipe, buf, src);
1723
1724 copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
1725 page, fsdata);
1726 if (copied < 0) {
1727 mlog_errno(copied);
1728 ret = copied;
1729 goto out;
1730 }
1731 out:
1732
1733 return copied ? copied : ret;
1734 }
1735
1736 static ssize_t __ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1737 struct file *out,
1738 loff_t *ppos,
1739 size_t len,
1740 unsigned int flags)
1741 {
1742 int ret, err;
1743 struct address_space *mapping = out->f_mapping;
1744 struct inode *inode = mapping->host;
1745 struct splice_desc sd = {
1746 .total_len = len,
1747 .flags = flags,
1748 .pos = *ppos,
1749 .u.file = out,
1750 };
1751
1752 ret = __splice_from_pipe(pipe, &sd, ocfs2_splice_write_actor);
1753 if (ret > 0) {
1754 *ppos += ret;
1755
1756 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
1757 err = generic_osync_inode(inode, mapping,
1758 OSYNC_METADATA|OSYNC_DATA);
1759 if (err)
1760 ret = err;
1761 }
1762 }
1763
1764 return ret;
1765 }
1766
1767 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
1768 struct file *out,
1769 loff_t *ppos,
1770 size_t len,
1771 unsigned int flags)
1772 {
1773 int ret;
1774 struct inode *inode = out->f_path.dentry->d_inode;
1775
1776 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
1777 (unsigned int)len,
1778 out->f_path.dentry->d_name.len,
1779 out->f_path.dentry->d_name.name);
1780
1781 inode_double_lock(inode, pipe->inode);
1782
1783 ret = ocfs2_rw_lock(inode, 1);
1784 if (ret < 0) {
1785 mlog_errno(ret);
1786 goto out;
1787 }
1788
1789 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
1790 NULL);
1791 if (ret < 0) {
1792 mlog_errno(ret);
1793 goto out_unlock;
1794 }
1795
1796 /* ok, we're done with i_size and alloc work */
1797 ret = __ocfs2_file_splice_write(pipe, out, ppos, len, flags);
1798
1799 out_unlock:
1800 ocfs2_rw_unlock(inode, 1);
1801 out:
1802 inode_double_unlock(inode, pipe->inode);
1803
1804 mlog_exit(ret);
1805 return ret;
1806 }
1807
1808 static ssize_t ocfs2_file_splice_read(struct file *in,
1809 loff_t *ppos,
1810 struct pipe_inode_info *pipe,
1811 size_t len,
1812 unsigned int flags)
1813 {
1814 int ret = 0;
1815 struct inode *inode = in->f_path.dentry->d_inode;
1816
1817 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
1818 (unsigned int)len,
1819 in->f_path.dentry->d_name.len,
1820 in->f_path.dentry->d_name.name);
1821
1822 /*
1823 * See the comment in ocfs2_file_aio_read()
1824 */
1825 ret = ocfs2_meta_lock(inode, NULL, 0);
1826 if (ret < 0) {
1827 mlog_errno(ret);
1828 goto bail;
1829 }
1830 ocfs2_meta_unlock(inode, 0);
1831
1832 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
1833
1834 bail:
1835 mlog_exit(ret);
1836 return ret;
1837 }
1838
1839 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
1840 const struct iovec *iov,
1841 unsigned long nr_segs,
1842 loff_t pos)
1843 {
1844 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
1845 struct file *filp = iocb->ki_filp;
1846 struct inode *inode = filp->f_path.dentry->d_inode;
1847
1848 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
1849 (unsigned int)nr_segs,
1850 filp->f_path.dentry->d_name.len,
1851 filp->f_path.dentry->d_name.name);
1852
1853 if (!inode) {
1854 ret = -EINVAL;
1855 mlog_errno(ret);
1856 goto bail;
1857 }
1858
1859 /*
1860 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
1861 * need locks to protect pending reads from racing with truncate.
1862 */
1863 if (filp->f_flags & O_DIRECT) {
1864 down_read(&inode->i_alloc_sem);
1865 have_alloc_sem = 1;
1866
1867 ret = ocfs2_rw_lock(inode, 0);
1868 if (ret < 0) {
1869 mlog_errno(ret);
1870 goto bail;
1871 }
1872 rw_level = 0;
1873 /* communicate with ocfs2_dio_end_io */
1874 ocfs2_iocb_set_rw_locked(iocb, rw_level);
1875 }
1876
1877 /*
1878 * We're fine letting folks race truncates and extending
1879 * writes with read across the cluster, just like they can
1880 * locally. Hence no rw_lock during read.
1881 *
1882 * Take and drop the meta data lock to update inode fields
1883 * like i_size. This allows the checks down below
1884 * generic_file_aio_read() a chance of actually working.
1885 */
1886 ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
1887 if (ret < 0) {
1888 mlog_errno(ret);
1889 goto bail;
1890 }
1891 ocfs2_meta_unlock(inode, lock_level);
1892
1893 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
1894 if (ret == -EINVAL)
1895 mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
1896
1897 /* buffered aio wouldn't have proper lock coverage today */
1898 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
1899
1900 /* see ocfs2_file_aio_write */
1901 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
1902 rw_level = -1;
1903 have_alloc_sem = 0;
1904 }
1905
1906 bail:
1907 if (have_alloc_sem)
1908 up_read(&inode->i_alloc_sem);
1909 if (rw_level != -1)
1910 ocfs2_rw_unlock(inode, rw_level);
1911 mlog_exit(ret);
1912
1913 return ret;
1914 }
1915
1916 const struct inode_operations ocfs2_file_iops = {
1917 .setattr = ocfs2_setattr,
1918 .getattr = ocfs2_getattr,
1919 .permission = ocfs2_permission,
1920 };
1921
1922 const struct inode_operations ocfs2_special_file_iops = {
1923 .setattr = ocfs2_setattr,
1924 .getattr = ocfs2_getattr,
1925 .permission = ocfs2_permission,
1926 };
1927
1928 const struct file_operations ocfs2_fops = {
1929 .read = do_sync_read,
1930 .write = do_sync_write,
1931 .mmap = ocfs2_mmap,
1932 .fsync = ocfs2_sync_file,
1933 .release = ocfs2_file_release,
1934 .open = ocfs2_file_open,
1935 .aio_read = ocfs2_file_aio_read,
1936 .aio_write = ocfs2_file_aio_write,
1937 .ioctl = ocfs2_ioctl,
1938 #ifdef CONFIG_COMPAT
1939 .compat_ioctl = ocfs2_compat_ioctl,
1940 #endif
1941 .splice_read = ocfs2_file_splice_read,
1942 .splice_write = ocfs2_file_splice_write,
1943 };
1944
1945 const struct file_operations ocfs2_dops = {
1946 .read = generic_read_dir,
1947 .readdir = ocfs2_readdir,
1948 .fsync = ocfs2_sync_file,
1949 .ioctl = ocfs2_ioctl,
1950 #ifdef CONFIG_COMPAT
1951 .compat_ioctl = ocfs2_compat_ioctl,
1952 #endif
1953 };
This page took 0.091549 seconds and 6 git commands to generate.