Freezer: make kernel threads nonfreezable by default
[deliverable/linux.git] / fs / ocfs2 / file.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26 #include <linux/capability.h>
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/uio.h>
33 #include <linux/sched.h>
34 #include <linux/splice.h>
35 #include <linux/mount.h>
36 #include <linux/writeback.h>
37
38 #define MLOG_MASK_PREFIX ML_INODE
39 #include <cluster/masklog.h>
40
41 #include "ocfs2.h"
42
43 #include "alloc.h"
44 #include "aops.h"
45 #include "dir.h"
46 #include "dlmglue.h"
47 #include "extent_map.h"
48 #include "file.h"
49 #include "sysfile.h"
50 #include "inode.h"
51 #include "ioctl.h"
52 #include "journal.h"
53 #include "mmap.h"
54 #include "suballoc.h"
55 #include "super.h"
56
57 #include "buffer_head_io.h"
58
59 static int ocfs2_sync_inode(struct inode *inode)
60 {
61 filemap_fdatawrite(inode->i_mapping);
62 return sync_mapping_buffers(inode->i_mapping);
63 }
64
65 static int ocfs2_file_open(struct inode *inode, struct file *file)
66 {
67 int status;
68 int mode = file->f_flags;
69 struct ocfs2_inode_info *oi = OCFS2_I(inode);
70
71 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
72 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name);
73
74 spin_lock(&oi->ip_lock);
75
76 /* Check that the inode hasn't been wiped from disk by another
77 * node. If it hasn't then we're safe as long as we hold the
78 * spin lock until our increment of open count. */
79 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
80 spin_unlock(&oi->ip_lock);
81
82 status = -ENOENT;
83 goto leave;
84 }
85
86 if (mode & O_DIRECT)
87 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
88
89 oi->ip_open_count++;
90 spin_unlock(&oi->ip_lock);
91 status = 0;
92 leave:
93 mlog_exit(status);
94 return status;
95 }
96
97 static int ocfs2_file_release(struct inode *inode, struct file *file)
98 {
99 struct ocfs2_inode_info *oi = OCFS2_I(inode);
100
101 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file,
102 file->f_path.dentry->d_name.len,
103 file->f_path.dentry->d_name.name);
104
105 spin_lock(&oi->ip_lock);
106 if (!--oi->ip_open_count)
107 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
108 spin_unlock(&oi->ip_lock);
109
110 mlog_exit(0);
111
112 return 0;
113 }
114
115 static int ocfs2_sync_file(struct file *file,
116 struct dentry *dentry,
117 int datasync)
118 {
119 int err = 0;
120 journal_t *journal;
121 struct inode *inode = dentry->d_inode;
122 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
123
124 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", file, dentry, datasync,
125 dentry->d_name.len, dentry->d_name.name);
126
127 err = ocfs2_sync_inode(dentry->d_inode);
128 if (err)
129 goto bail;
130
131 journal = osb->journal->j_journal;
132 err = journal_force_commit(journal);
133
134 bail:
135 mlog_exit(err);
136
137 return (err < 0) ? -EIO : 0;
138 }
139
140 int ocfs2_should_update_atime(struct inode *inode,
141 struct vfsmount *vfsmnt)
142 {
143 struct timespec now;
144 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
145
146 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
147 return 0;
148
149 if ((inode->i_flags & S_NOATIME) ||
150 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
151 return 0;
152
153 /*
154 * We can be called with no vfsmnt structure - NFSD will
155 * sometimes do this.
156 *
157 * Note that our action here is different than touch_atime() -
158 * if we can't tell whether this is a noatime mount, then we
159 * don't know whether to trust the value of s_atime_quantum.
160 */
161 if (vfsmnt == NULL)
162 return 0;
163
164 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
165 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
166 return 0;
167
168 if (vfsmnt->mnt_flags & MNT_RELATIME) {
169 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
170 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
171 return 1;
172
173 return 0;
174 }
175
176 now = CURRENT_TIME;
177 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
178 return 0;
179 else
180 return 1;
181 }
182
183 int ocfs2_update_inode_atime(struct inode *inode,
184 struct buffer_head *bh)
185 {
186 int ret;
187 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
188 handle_t *handle;
189
190 mlog_entry_void();
191
192 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
193 if (handle == NULL) {
194 ret = -ENOMEM;
195 mlog_errno(ret);
196 goto out;
197 }
198
199 inode->i_atime = CURRENT_TIME;
200 ret = ocfs2_mark_inode_dirty(handle, inode, bh);
201 if (ret < 0)
202 mlog_errno(ret);
203
204 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
205 out:
206 mlog_exit(ret);
207 return ret;
208 }
209
210 static int ocfs2_set_inode_size(handle_t *handle,
211 struct inode *inode,
212 struct buffer_head *fe_bh,
213 u64 new_i_size)
214 {
215 int status;
216
217 mlog_entry_void();
218 i_size_write(inode, new_i_size);
219 inode->i_blocks = ocfs2_inode_sector_count(inode);
220 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
221
222 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
223 if (status < 0) {
224 mlog_errno(status);
225 goto bail;
226 }
227
228 bail:
229 mlog_exit(status);
230 return status;
231 }
232
233 static int ocfs2_simple_size_update(struct inode *inode,
234 struct buffer_head *di_bh,
235 u64 new_i_size)
236 {
237 int ret;
238 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
239 handle_t *handle = NULL;
240
241 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
242 if (handle == NULL) {
243 ret = -ENOMEM;
244 mlog_errno(ret);
245 goto out;
246 }
247
248 ret = ocfs2_set_inode_size(handle, inode, di_bh,
249 new_i_size);
250 if (ret < 0)
251 mlog_errno(ret);
252
253 ocfs2_commit_trans(osb, handle);
254 out:
255 return ret;
256 }
257
258 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
259 struct inode *inode,
260 struct buffer_head *fe_bh,
261 u64 new_i_size)
262 {
263 int status;
264 handle_t *handle;
265 struct ocfs2_dinode *di;
266 u64 cluster_bytes;
267
268 mlog_entry_void();
269
270 /* TODO: This needs to actually orphan the inode in this
271 * transaction. */
272
273 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
274 if (IS_ERR(handle)) {
275 status = PTR_ERR(handle);
276 mlog_errno(status);
277 goto out;
278 }
279
280 status = ocfs2_journal_access(handle, inode, fe_bh,
281 OCFS2_JOURNAL_ACCESS_WRITE);
282 if (status < 0) {
283 mlog_errno(status);
284 goto out_commit;
285 }
286
287 /*
288 * Do this before setting i_size.
289 */
290 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
291 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
292 cluster_bytes);
293 if (status) {
294 mlog_errno(status);
295 goto out_commit;
296 }
297
298 i_size_write(inode, new_i_size);
299 inode->i_blocks = ocfs2_align_bytes_to_sectors(new_i_size);
300 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
301
302 di = (struct ocfs2_dinode *) fe_bh->b_data;
303 di->i_size = cpu_to_le64(new_i_size);
304 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
305 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
306
307 status = ocfs2_journal_dirty(handle, fe_bh);
308 if (status < 0)
309 mlog_errno(status);
310
311 out_commit:
312 ocfs2_commit_trans(osb, handle);
313 out:
314
315 mlog_exit(status);
316 return status;
317 }
318
319 static int ocfs2_truncate_file(struct inode *inode,
320 struct buffer_head *di_bh,
321 u64 new_i_size)
322 {
323 int status = 0;
324 struct ocfs2_dinode *fe = NULL;
325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
326 struct ocfs2_truncate_context *tc = NULL;
327
328 mlog_entry("(inode = %llu, new_i_size = %llu\n",
329 (unsigned long long)OCFS2_I(inode)->ip_blkno,
330 (unsigned long long)new_i_size);
331
332 fe = (struct ocfs2_dinode *) di_bh->b_data;
333 if (!OCFS2_IS_VALID_DINODE(fe)) {
334 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
335 status = -EIO;
336 goto bail;
337 }
338
339 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
340 "Inode %llu, inode i_size = %lld != di "
341 "i_size = %llu, i_flags = 0x%x\n",
342 (unsigned long long)OCFS2_I(inode)->ip_blkno,
343 i_size_read(inode),
344 (unsigned long long)le64_to_cpu(fe->i_size),
345 le32_to_cpu(fe->i_flags));
346
347 if (new_i_size > le64_to_cpu(fe->i_size)) {
348 mlog(0, "asked to truncate file with size (%llu) to size (%llu)!\n",
349 (unsigned long long)le64_to_cpu(fe->i_size),
350 (unsigned long long)new_i_size);
351 status = -EINVAL;
352 mlog_errno(status);
353 goto bail;
354 }
355
356 mlog(0, "inode %llu, i_size = %llu, new_i_size = %llu\n",
357 (unsigned long long)le64_to_cpu(fe->i_blkno),
358 (unsigned long long)le64_to_cpu(fe->i_size),
359 (unsigned long long)new_i_size);
360
361 /* lets handle the simple truncate cases before doing any more
362 * cluster locking. */
363 if (new_i_size == le64_to_cpu(fe->i_size))
364 goto bail;
365
366 down_write(&OCFS2_I(inode)->ip_alloc_sem);
367
368 /* This forces other nodes to sync and drop their pages. Do
369 * this even if we have a truncate without allocation change -
370 * ocfs2 cluster sizes can be much greater than page size, so
371 * we have to truncate them anyway. */
372 status = ocfs2_data_lock(inode, 1);
373 if (status < 0) {
374 up_write(&OCFS2_I(inode)->ip_alloc_sem);
375
376 mlog_errno(status);
377 goto bail;
378 }
379
380 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
381 truncate_inode_pages(inode->i_mapping, new_i_size);
382
383 /* alright, we're going to need to do a full blown alloc size
384 * change. Orphan the inode so that recovery can complete the
385 * truncate if necessary. This does the task of marking
386 * i_size. */
387 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
388 if (status < 0) {
389 mlog_errno(status);
390 goto bail_unlock_data;
391 }
392
393 status = ocfs2_prepare_truncate(osb, inode, di_bh, &tc);
394 if (status < 0) {
395 mlog_errno(status);
396 goto bail_unlock_data;
397 }
398
399 status = ocfs2_commit_truncate(osb, inode, di_bh, tc);
400 if (status < 0) {
401 mlog_errno(status);
402 goto bail_unlock_data;
403 }
404
405 /* TODO: orphan dir cleanup here. */
406 bail_unlock_data:
407 ocfs2_data_unlock(inode, 1);
408
409 up_write(&OCFS2_I(inode)->ip_alloc_sem);
410
411 bail:
412
413 mlog_exit(status);
414 return status;
415 }
416
417 /*
418 * extend allocation only here.
419 * we'll update all the disk stuff, and oip->alloc_size
420 *
421 * expect stuff to be locked, a transaction started and enough data /
422 * metadata reservations in the contexts.
423 *
424 * Will return -EAGAIN, and a reason if a restart is needed.
425 * If passed in, *reason will always be set, even in error.
426 */
427 int ocfs2_do_extend_allocation(struct ocfs2_super *osb,
428 struct inode *inode,
429 u32 *logical_offset,
430 u32 clusters_to_add,
431 int mark_unwritten,
432 struct buffer_head *fe_bh,
433 handle_t *handle,
434 struct ocfs2_alloc_context *data_ac,
435 struct ocfs2_alloc_context *meta_ac,
436 enum ocfs2_alloc_restarted *reason_ret)
437 {
438 int status = 0;
439 int free_extents;
440 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
441 enum ocfs2_alloc_restarted reason = RESTART_NONE;
442 u32 bit_off, num_bits;
443 u64 block;
444 u8 flags = 0;
445
446 BUG_ON(!clusters_to_add);
447
448 if (mark_unwritten)
449 flags = OCFS2_EXT_UNWRITTEN;
450
451 free_extents = ocfs2_num_free_extents(osb, inode, fe);
452 if (free_extents < 0) {
453 status = free_extents;
454 mlog_errno(status);
455 goto leave;
456 }
457
458 /* there are two cases which could cause us to EAGAIN in the
459 * we-need-more-metadata case:
460 * 1) we haven't reserved *any*
461 * 2) we are so fragmented, we've needed to add metadata too
462 * many times. */
463 if (!free_extents && !meta_ac) {
464 mlog(0, "we haven't reserved any metadata!\n");
465 status = -EAGAIN;
466 reason = RESTART_META;
467 goto leave;
468 } else if ((!free_extents)
469 && (ocfs2_alloc_context_bits_left(meta_ac)
470 < ocfs2_extend_meta_needed(fe))) {
471 mlog(0, "filesystem is really fragmented...\n");
472 status = -EAGAIN;
473 reason = RESTART_META;
474 goto leave;
475 }
476
477 status = ocfs2_claim_clusters(osb, handle, data_ac, 1,
478 &bit_off, &num_bits);
479 if (status < 0) {
480 if (status != -ENOSPC)
481 mlog_errno(status);
482 goto leave;
483 }
484
485 BUG_ON(num_bits > clusters_to_add);
486
487 /* reserve our write early -- insert_extent may update the inode */
488 status = ocfs2_journal_access(handle, inode, fe_bh,
489 OCFS2_JOURNAL_ACCESS_WRITE);
490 if (status < 0) {
491 mlog_errno(status);
492 goto leave;
493 }
494
495 block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
496 mlog(0, "Allocating %u clusters at block %u for inode %llu\n",
497 num_bits, bit_off, (unsigned long long)OCFS2_I(inode)->ip_blkno);
498 status = ocfs2_insert_extent(osb, handle, inode, fe_bh,
499 *logical_offset, block, num_bits,
500 flags, meta_ac);
501 if (status < 0) {
502 mlog_errno(status);
503 goto leave;
504 }
505
506 status = ocfs2_journal_dirty(handle, fe_bh);
507 if (status < 0) {
508 mlog_errno(status);
509 goto leave;
510 }
511
512 clusters_to_add -= num_bits;
513 *logical_offset += num_bits;
514
515 if (clusters_to_add) {
516 mlog(0, "need to alloc once more, clusters = %u, wanted = "
517 "%u\n", fe->i_clusters, clusters_to_add);
518 status = -EAGAIN;
519 reason = RESTART_TRANS;
520 }
521
522 leave:
523 mlog_exit(status);
524 if (reason_ret)
525 *reason_ret = reason;
526 return status;
527 }
528
529 /*
530 * For a given allocation, determine which allocators will need to be
531 * accessed, and lock them, reserving the appropriate number of bits.
532 *
533 * Sparse file systems call this from ocfs2_write_begin_nolock()
534 * and ocfs2_allocate_unwritten_extents().
535 *
536 * File systems which don't support holes call this from
537 * ocfs2_extend_allocation().
538 */
539 int ocfs2_lock_allocators(struct inode *inode, struct ocfs2_dinode *di,
540 u32 clusters_to_add, u32 extents_to_split,
541 struct ocfs2_alloc_context **data_ac,
542 struct ocfs2_alloc_context **meta_ac)
543 {
544 int ret = 0, num_free_extents;
545 unsigned int max_recs_needed = clusters_to_add + 2 * extents_to_split;
546 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
547
548 *meta_ac = NULL;
549 if (data_ac)
550 *data_ac = NULL;
551
552 BUG_ON(clusters_to_add != 0 && data_ac == NULL);
553
554 mlog(0, "extend inode %llu, i_size = %lld, di->i_clusters = %u, "
555 "clusters_to_add = %u, extents_to_split = %u\n",
556 (unsigned long long)OCFS2_I(inode)->ip_blkno, i_size_read(inode),
557 le32_to_cpu(di->i_clusters), clusters_to_add, extents_to_split);
558
559 num_free_extents = ocfs2_num_free_extents(osb, inode, di);
560 if (num_free_extents < 0) {
561 ret = num_free_extents;
562 mlog_errno(ret);
563 goto out;
564 }
565
566 /*
567 * Sparse allocation file systems need to be more conservative
568 * with reserving room for expansion - the actual allocation
569 * happens while we've got a journal handle open so re-taking
570 * a cluster lock (because we ran out of room for another
571 * extent) will violate ordering rules.
572 *
573 * Most of the time we'll only be seeing this 1 cluster at a time
574 * anyway.
575 *
576 * Always lock for any unwritten extents - we might want to
577 * add blocks during a split.
578 */
579 if (!num_free_extents ||
580 (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) {
581 ret = ocfs2_reserve_new_metadata(osb, di, meta_ac);
582 if (ret < 0) {
583 if (ret != -ENOSPC)
584 mlog_errno(ret);
585 goto out;
586 }
587 }
588
589 if (clusters_to_add == 0)
590 goto out;
591
592 ret = ocfs2_reserve_clusters(osb, clusters_to_add, data_ac);
593 if (ret < 0) {
594 if (ret != -ENOSPC)
595 mlog_errno(ret);
596 goto out;
597 }
598
599 out:
600 if (ret) {
601 if (*meta_ac) {
602 ocfs2_free_alloc_context(*meta_ac);
603 *meta_ac = NULL;
604 }
605
606 /*
607 * We cannot have an error and a non null *data_ac.
608 */
609 }
610
611 return ret;
612 }
613
614 static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
615 u32 clusters_to_add, int mark_unwritten)
616 {
617 int status = 0;
618 int restart_func = 0;
619 int credits;
620 u32 prev_clusters;
621 struct buffer_head *bh = NULL;
622 struct ocfs2_dinode *fe = NULL;
623 handle_t *handle = NULL;
624 struct ocfs2_alloc_context *data_ac = NULL;
625 struct ocfs2_alloc_context *meta_ac = NULL;
626 enum ocfs2_alloc_restarted why;
627 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
628
629 mlog_entry("(clusters_to_add = %u)\n", clusters_to_add);
630
631 /*
632 * This function only exists for file systems which don't
633 * support holes.
634 */
635 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
636
637 status = ocfs2_read_block(osb, OCFS2_I(inode)->ip_blkno, &bh,
638 OCFS2_BH_CACHED, inode);
639 if (status < 0) {
640 mlog_errno(status);
641 goto leave;
642 }
643
644 fe = (struct ocfs2_dinode *) bh->b_data;
645 if (!OCFS2_IS_VALID_DINODE(fe)) {
646 OCFS2_RO_ON_INVALID_DINODE(inode->i_sb, fe);
647 status = -EIO;
648 goto leave;
649 }
650
651 restart_all:
652 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
653
654 status = ocfs2_lock_allocators(inode, fe, clusters_to_add, 0, &data_ac,
655 &meta_ac);
656 if (status) {
657 mlog_errno(status);
658 goto leave;
659 }
660
661 credits = ocfs2_calc_extend_credits(osb->sb, fe, clusters_to_add);
662 handle = ocfs2_start_trans(osb, credits);
663 if (IS_ERR(handle)) {
664 status = PTR_ERR(handle);
665 handle = NULL;
666 mlog_errno(status);
667 goto leave;
668 }
669
670 restarted_transaction:
671 /* reserve a write to the file entry early on - that we if we
672 * run out of credits in the allocation path, we can still
673 * update i_size. */
674 status = ocfs2_journal_access(handle, inode, bh,
675 OCFS2_JOURNAL_ACCESS_WRITE);
676 if (status < 0) {
677 mlog_errno(status);
678 goto leave;
679 }
680
681 prev_clusters = OCFS2_I(inode)->ip_clusters;
682
683 status = ocfs2_do_extend_allocation(osb,
684 inode,
685 &logical_start,
686 clusters_to_add,
687 mark_unwritten,
688 bh,
689 handle,
690 data_ac,
691 meta_ac,
692 &why);
693 if ((status < 0) && (status != -EAGAIN)) {
694 if (status != -ENOSPC)
695 mlog_errno(status);
696 goto leave;
697 }
698
699 status = ocfs2_journal_dirty(handle, bh);
700 if (status < 0) {
701 mlog_errno(status);
702 goto leave;
703 }
704
705 spin_lock(&OCFS2_I(inode)->ip_lock);
706 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
707 spin_unlock(&OCFS2_I(inode)->ip_lock);
708
709 if (why != RESTART_NONE && clusters_to_add) {
710 if (why == RESTART_META) {
711 mlog(0, "restarting function.\n");
712 restart_func = 1;
713 } else {
714 BUG_ON(why != RESTART_TRANS);
715
716 mlog(0, "restarting transaction.\n");
717 /* TODO: This can be more intelligent. */
718 credits = ocfs2_calc_extend_credits(osb->sb,
719 fe,
720 clusters_to_add);
721 status = ocfs2_extend_trans(handle, credits);
722 if (status < 0) {
723 /* handle still has to be committed at
724 * this point. */
725 status = -ENOMEM;
726 mlog_errno(status);
727 goto leave;
728 }
729 goto restarted_transaction;
730 }
731 }
732
733 mlog(0, "fe: i_clusters = %u, i_size=%llu\n",
734 le32_to_cpu(fe->i_clusters),
735 (unsigned long long)le64_to_cpu(fe->i_size));
736 mlog(0, "inode: ip_clusters=%u, i_size=%lld\n",
737 OCFS2_I(inode)->ip_clusters, i_size_read(inode));
738
739 leave:
740 if (handle) {
741 ocfs2_commit_trans(osb, handle);
742 handle = NULL;
743 }
744 if (data_ac) {
745 ocfs2_free_alloc_context(data_ac);
746 data_ac = NULL;
747 }
748 if (meta_ac) {
749 ocfs2_free_alloc_context(meta_ac);
750 meta_ac = NULL;
751 }
752 if ((!status) && restart_func) {
753 restart_func = 0;
754 goto restart_all;
755 }
756 if (bh) {
757 brelse(bh);
758 bh = NULL;
759 }
760
761 mlog_exit(status);
762 return status;
763 }
764
765 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
766 u32 clusters_to_add, int mark_unwritten)
767 {
768 int ret;
769
770 /*
771 * The alloc sem blocks peope in read/write from reading our
772 * allocation until we're done changing it. We depend on
773 * i_mutex to block other extend/truncate calls while we're
774 * here.
775 */
776 down_write(&OCFS2_I(inode)->ip_alloc_sem);
777 ret = __ocfs2_extend_allocation(inode, logical_start, clusters_to_add,
778 mark_unwritten);
779 up_write(&OCFS2_I(inode)->ip_alloc_sem);
780
781 return ret;
782 }
783
784 /* Some parts of this taken from generic_cont_expand, which turned out
785 * to be too fragile to do exactly what we need without us having to
786 * worry about recursive locking in ->prepare_write() and
787 * ->commit_write(). */
788 static int ocfs2_write_zero_page(struct inode *inode,
789 u64 size)
790 {
791 struct address_space *mapping = inode->i_mapping;
792 struct page *page;
793 unsigned long index;
794 unsigned int offset;
795 handle_t *handle = NULL;
796 int ret;
797
798 offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
799 /* ugh. in prepare/commit_write, if from==to==start of block, we
800 ** skip the prepare. make sure we never send an offset for the start
801 ** of a block
802 */
803 if ((offset & (inode->i_sb->s_blocksize - 1)) == 0) {
804 offset++;
805 }
806 index = size >> PAGE_CACHE_SHIFT;
807
808 page = grab_cache_page(mapping, index);
809 if (!page) {
810 ret = -ENOMEM;
811 mlog_errno(ret);
812 goto out;
813 }
814
815 ret = ocfs2_prepare_write_nolock(inode, page, offset, offset);
816 if (ret < 0) {
817 mlog_errno(ret);
818 goto out_unlock;
819 }
820
821 if (ocfs2_should_order_data(inode)) {
822 handle = ocfs2_start_walk_page_trans(inode, page, offset,
823 offset);
824 if (IS_ERR(handle)) {
825 ret = PTR_ERR(handle);
826 handle = NULL;
827 goto out_unlock;
828 }
829 }
830
831 /* must not update i_size! */
832 ret = block_commit_write(page, offset, offset);
833 if (ret < 0)
834 mlog_errno(ret);
835 else
836 ret = 0;
837
838 if (handle)
839 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
840 out_unlock:
841 unlock_page(page);
842 page_cache_release(page);
843 out:
844 return ret;
845 }
846
847 static int ocfs2_zero_extend(struct inode *inode,
848 u64 zero_to_size)
849 {
850 int ret = 0;
851 u64 start_off;
852 struct super_block *sb = inode->i_sb;
853
854 start_off = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
855 while (start_off < zero_to_size) {
856 ret = ocfs2_write_zero_page(inode, start_off);
857 if (ret < 0) {
858 mlog_errno(ret);
859 goto out;
860 }
861
862 start_off += sb->s_blocksize;
863
864 /*
865 * Very large extends have the potential to lock up
866 * the cpu for extended periods of time.
867 */
868 cond_resched();
869 }
870
871 out:
872 return ret;
873 }
874
875 /*
876 * A tail_to_skip value > 0 indicates that we're being called from
877 * ocfs2_file_aio_write(). This has the following implications:
878 *
879 * - we don't want to update i_size
880 * - di_bh will be NULL, which is fine because it's only used in the
881 * case where we want to update i_size.
882 * - ocfs2_zero_extend() will then only be filling the hole created
883 * between i_size and the start of the write.
884 */
885 static int ocfs2_extend_file(struct inode *inode,
886 struct buffer_head *di_bh,
887 u64 new_i_size,
888 size_t tail_to_skip)
889 {
890 int ret = 0;
891 u32 clusters_to_add = 0;
892
893 BUG_ON(!tail_to_skip && !di_bh);
894
895 /* setattr sometimes calls us like this. */
896 if (new_i_size == 0)
897 goto out;
898
899 if (i_size_read(inode) == new_i_size)
900 goto out;
901 BUG_ON(new_i_size < i_size_read(inode));
902
903 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
904 BUG_ON(tail_to_skip != 0);
905 goto out_update_size;
906 }
907
908 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size) -
909 OCFS2_I(inode)->ip_clusters;
910
911 /*
912 * protect the pages that ocfs2_zero_extend is going to be
913 * pulling into the page cache.. we do this before the
914 * metadata extend so that we don't get into the situation
915 * where we've extended the metadata but can't get the data
916 * lock to zero.
917 */
918 ret = ocfs2_data_lock(inode, 1);
919 if (ret < 0) {
920 mlog_errno(ret);
921 goto out;
922 }
923
924 if (clusters_to_add) {
925 ret = ocfs2_extend_allocation(inode,
926 OCFS2_I(inode)->ip_clusters,
927 clusters_to_add, 0);
928 if (ret < 0) {
929 mlog_errno(ret);
930 goto out_unlock;
931 }
932 }
933
934 /*
935 * Call this even if we don't add any clusters to the tree. We
936 * still need to zero the area between the old i_size and the
937 * new i_size.
938 */
939 ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip);
940 if (ret < 0) {
941 mlog_errno(ret);
942 goto out_unlock;
943 }
944
945 out_update_size:
946 if (!tail_to_skip) {
947 /* We're being called from ocfs2_setattr() which wants
948 * us to update i_size */
949 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
950 if (ret < 0)
951 mlog_errno(ret);
952 }
953
954 out_unlock:
955 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
956 ocfs2_data_unlock(inode, 1);
957
958 out:
959 return ret;
960 }
961
962 int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
963 {
964 int status = 0, size_change;
965 struct inode *inode = dentry->d_inode;
966 struct super_block *sb = inode->i_sb;
967 struct ocfs2_super *osb = OCFS2_SB(sb);
968 struct buffer_head *bh = NULL;
969 handle_t *handle = NULL;
970
971 mlog_entry("(0x%p, '%.*s')\n", dentry,
972 dentry->d_name.len, dentry->d_name.name);
973
974 if (attr->ia_valid & ATTR_MODE)
975 mlog(0, "mode change: %d\n", attr->ia_mode);
976 if (attr->ia_valid & ATTR_UID)
977 mlog(0, "uid change: %d\n", attr->ia_uid);
978 if (attr->ia_valid & ATTR_GID)
979 mlog(0, "gid change: %d\n", attr->ia_gid);
980 if (attr->ia_valid & ATTR_SIZE)
981 mlog(0, "size change...\n");
982 if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
983 mlog(0, "time change...\n");
984
985 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
986 | ATTR_GID | ATTR_UID | ATTR_MODE)
987 if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
988 mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
989 return 0;
990 }
991
992 status = inode_change_ok(inode, attr);
993 if (status)
994 return status;
995
996 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
997 if (size_change) {
998 status = ocfs2_rw_lock(inode, 1);
999 if (status < 0) {
1000 mlog_errno(status);
1001 goto bail;
1002 }
1003 }
1004
1005 status = ocfs2_meta_lock(inode, &bh, 1);
1006 if (status < 0) {
1007 if (status != -ENOENT)
1008 mlog_errno(status);
1009 goto bail_unlock_rw;
1010 }
1011
1012 if (size_change && attr->ia_size != i_size_read(inode)) {
1013 if (i_size_read(inode) > attr->ia_size)
1014 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1015 else
1016 status = ocfs2_extend_file(inode, bh, attr->ia_size, 0);
1017 if (status < 0) {
1018 if (status != -ENOSPC)
1019 mlog_errno(status);
1020 status = -ENOSPC;
1021 goto bail_unlock;
1022 }
1023 }
1024
1025 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1026 if (IS_ERR(handle)) {
1027 status = PTR_ERR(handle);
1028 mlog_errno(status);
1029 goto bail_unlock;
1030 }
1031
1032 /*
1033 * This will intentionally not wind up calling vmtruncate(),
1034 * since all the work for a size change has been done above.
1035 * Otherwise, we could get into problems with truncate as
1036 * ip_alloc_sem is used there to protect against i_size
1037 * changes.
1038 */
1039 status = inode_setattr(inode, attr);
1040 if (status < 0) {
1041 mlog_errno(status);
1042 goto bail_commit;
1043 }
1044
1045 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1046 if (status < 0)
1047 mlog_errno(status);
1048
1049 bail_commit:
1050 ocfs2_commit_trans(osb, handle);
1051 bail_unlock:
1052 ocfs2_meta_unlock(inode, 1);
1053 bail_unlock_rw:
1054 if (size_change)
1055 ocfs2_rw_unlock(inode, 1);
1056 bail:
1057 if (bh)
1058 brelse(bh);
1059
1060 mlog_exit(status);
1061 return status;
1062 }
1063
1064 int ocfs2_getattr(struct vfsmount *mnt,
1065 struct dentry *dentry,
1066 struct kstat *stat)
1067 {
1068 struct inode *inode = dentry->d_inode;
1069 struct super_block *sb = dentry->d_inode->i_sb;
1070 struct ocfs2_super *osb = sb->s_fs_info;
1071 int err;
1072
1073 mlog_entry_void();
1074
1075 err = ocfs2_inode_revalidate(dentry);
1076 if (err) {
1077 if (err != -ENOENT)
1078 mlog_errno(err);
1079 goto bail;
1080 }
1081
1082 generic_fillattr(inode, stat);
1083
1084 /* We set the blksize from the cluster size for performance */
1085 stat->blksize = osb->s_clustersize;
1086
1087 bail:
1088 mlog_exit(err);
1089
1090 return err;
1091 }
1092
1093 int ocfs2_permission(struct inode *inode, int mask, struct nameidata *nd)
1094 {
1095 int ret;
1096
1097 mlog_entry_void();
1098
1099 ret = ocfs2_meta_lock(inode, NULL, 0);
1100 if (ret) {
1101 if (ret != -ENOENT)
1102 mlog_errno(ret);
1103 goto out;
1104 }
1105
1106 ret = generic_permission(inode, mask, NULL);
1107
1108 ocfs2_meta_unlock(inode, 0);
1109 out:
1110 mlog_exit(ret);
1111 return ret;
1112 }
1113
1114 static int __ocfs2_write_remove_suid(struct inode *inode,
1115 struct buffer_head *bh)
1116 {
1117 int ret;
1118 handle_t *handle;
1119 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1120 struct ocfs2_dinode *di;
1121
1122 mlog_entry("(Inode %llu, mode 0%o)\n",
1123 (unsigned long long)OCFS2_I(inode)->ip_blkno, inode->i_mode);
1124
1125 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1126 if (handle == NULL) {
1127 ret = -ENOMEM;
1128 mlog_errno(ret);
1129 goto out;
1130 }
1131
1132 ret = ocfs2_journal_access(handle, inode, bh,
1133 OCFS2_JOURNAL_ACCESS_WRITE);
1134 if (ret < 0) {
1135 mlog_errno(ret);
1136 goto out_trans;
1137 }
1138
1139 inode->i_mode &= ~S_ISUID;
1140 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1141 inode->i_mode &= ~S_ISGID;
1142
1143 di = (struct ocfs2_dinode *) bh->b_data;
1144 di->i_mode = cpu_to_le16(inode->i_mode);
1145
1146 ret = ocfs2_journal_dirty(handle, bh);
1147 if (ret < 0)
1148 mlog_errno(ret);
1149
1150 out_trans:
1151 ocfs2_commit_trans(osb, handle);
1152 out:
1153 mlog_exit(ret);
1154 return ret;
1155 }
1156
1157 /*
1158 * Will look for holes and unwritten extents in the range starting at
1159 * pos for count bytes (inclusive).
1160 */
1161 static int ocfs2_check_range_for_holes(struct inode *inode, loff_t pos,
1162 size_t count)
1163 {
1164 int ret = 0;
1165 unsigned int extent_flags;
1166 u32 cpos, clusters, extent_len, phys_cpos;
1167 struct super_block *sb = inode->i_sb;
1168
1169 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
1170 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
1171
1172 while (clusters) {
1173 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
1174 &extent_flags);
1175 if (ret < 0) {
1176 mlog_errno(ret);
1177 goto out;
1178 }
1179
1180 if (phys_cpos == 0 || (extent_flags & OCFS2_EXT_UNWRITTEN)) {
1181 ret = 1;
1182 break;
1183 }
1184
1185 if (extent_len > clusters)
1186 extent_len = clusters;
1187
1188 clusters -= extent_len;
1189 cpos += extent_len;
1190 }
1191 out:
1192 return ret;
1193 }
1194
1195 static int ocfs2_write_remove_suid(struct inode *inode)
1196 {
1197 int ret;
1198 struct buffer_head *bh = NULL;
1199 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1200
1201 ret = ocfs2_read_block(OCFS2_SB(inode->i_sb),
1202 oi->ip_blkno, &bh, OCFS2_BH_CACHED, inode);
1203 if (ret < 0) {
1204 mlog_errno(ret);
1205 goto out;
1206 }
1207
1208 ret = __ocfs2_write_remove_suid(inode, bh);
1209 out:
1210 brelse(bh);
1211 return ret;
1212 }
1213
1214 /*
1215 * Allocate enough extents to cover the region starting at byte offset
1216 * start for len bytes. Existing extents are skipped, any extents
1217 * added are marked as "unwritten".
1218 */
1219 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1220 u64 start, u64 len)
1221 {
1222 int ret;
1223 u32 cpos, phys_cpos, clusters, alloc_size;
1224
1225 /*
1226 * We consider both start and len to be inclusive.
1227 */
1228 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1229 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1230 clusters -= cpos;
1231
1232 while (clusters) {
1233 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1234 &alloc_size, NULL);
1235 if (ret) {
1236 mlog_errno(ret);
1237 goto out;
1238 }
1239
1240 /*
1241 * Hole or existing extent len can be arbitrary, so
1242 * cap it to our own allocation request.
1243 */
1244 if (alloc_size > clusters)
1245 alloc_size = clusters;
1246
1247 if (phys_cpos) {
1248 /*
1249 * We already have an allocation at this
1250 * region so we can safely skip it.
1251 */
1252 goto next;
1253 }
1254
1255 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1256 if (ret) {
1257 if (ret != -ENOSPC)
1258 mlog_errno(ret);
1259 goto out;
1260 }
1261
1262 next:
1263 cpos += alloc_size;
1264 clusters -= alloc_size;
1265 }
1266
1267 ret = 0;
1268 out:
1269 return ret;
1270 }
1271
1272 static int __ocfs2_remove_inode_range(struct inode *inode,
1273 struct buffer_head *di_bh,
1274 u32 cpos, u32 phys_cpos, u32 len,
1275 struct ocfs2_cached_dealloc_ctxt *dealloc)
1276 {
1277 int ret;
1278 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
1279 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1280 struct inode *tl_inode = osb->osb_tl_inode;
1281 handle_t *handle;
1282 struct ocfs2_alloc_context *meta_ac = NULL;
1283 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1284
1285 ret = ocfs2_lock_allocators(inode, di, 0, 1, NULL, &meta_ac);
1286 if (ret) {
1287 mlog_errno(ret);
1288 return ret;
1289 }
1290
1291 mutex_lock(&tl_inode->i_mutex);
1292
1293 if (ocfs2_truncate_log_needs_flush(osb)) {
1294 ret = __ocfs2_flush_truncate_log(osb);
1295 if (ret < 0) {
1296 mlog_errno(ret);
1297 goto out;
1298 }
1299 }
1300
1301 handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS);
1302 if (handle == NULL) {
1303 ret = -ENOMEM;
1304 mlog_errno(ret);
1305 goto out;
1306 }
1307
1308 ret = ocfs2_journal_access(handle, inode, di_bh,
1309 OCFS2_JOURNAL_ACCESS_WRITE);
1310 if (ret) {
1311 mlog_errno(ret);
1312 goto out;
1313 }
1314
1315 ret = ocfs2_remove_extent(inode, di_bh, cpos, len, handle, meta_ac,
1316 dealloc);
1317 if (ret) {
1318 mlog_errno(ret);
1319 goto out_commit;
1320 }
1321
1322 OCFS2_I(inode)->ip_clusters -= len;
1323 di->i_clusters = cpu_to_le32(OCFS2_I(inode)->ip_clusters);
1324
1325 ret = ocfs2_journal_dirty(handle, di_bh);
1326 if (ret) {
1327 mlog_errno(ret);
1328 goto out_commit;
1329 }
1330
1331 ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
1332 if (ret)
1333 mlog_errno(ret);
1334
1335 out_commit:
1336 ocfs2_commit_trans(osb, handle);
1337 out:
1338 mutex_unlock(&tl_inode->i_mutex);
1339
1340 if (meta_ac)
1341 ocfs2_free_alloc_context(meta_ac);
1342
1343 return ret;
1344 }
1345
1346 /*
1347 * Truncate a byte range, avoiding pages within partial clusters. This
1348 * preserves those pages for the zeroing code to write to.
1349 */
1350 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1351 u64 byte_len)
1352 {
1353 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1354 loff_t start, end;
1355 struct address_space *mapping = inode->i_mapping;
1356
1357 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1358 end = byte_start + byte_len;
1359 end = end & ~(osb->s_clustersize - 1);
1360
1361 if (start < end) {
1362 unmap_mapping_range(mapping, start, end - start, 0);
1363 truncate_inode_pages_range(mapping, start, end - 1);
1364 }
1365 }
1366
1367 static int ocfs2_zero_partial_clusters(struct inode *inode,
1368 u64 start, u64 len)
1369 {
1370 int ret = 0;
1371 u64 tmpend, end = start + len;
1372 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1373 unsigned int csize = osb->s_clustersize;
1374 handle_t *handle;
1375
1376 /*
1377 * The "start" and "end" values are NOT necessarily part of
1378 * the range whose allocation is being deleted. Rather, this
1379 * is what the user passed in with the request. We must zero
1380 * partial clusters here. There's no need to worry about
1381 * physical allocation - the zeroing code knows to skip holes.
1382 */
1383 mlog(0, "byte start: %llu, end: %llu\n",
1384 (unsigned long long)start, (unsigned long long)end);
1385
1386 /*
1387 * If both edges are on a cluster boundary then there's no
1388 * zeroing required as the region is part of the allocation to
1389 * be truncated.
1390 */
1391 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1392 goto out;
1393
1394 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1395 if (handle == NULL) {
1396 ret = -ENOMEM;
1397 mlog_errno(ret);
1398 goto out;
1399 }
1400
1401 /*
1402 * We want to get the byte offset of the end of the 1st cluster.
1403 */
1404 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1405 if (tmpend > end)
1406 tmpend = end;
1407
1408 mlog(0, "1st range: start: %llu, tmpend: %llu\n",
1409 (unsigned long long)start, (unsigned long long)tmpend);
1410
1411 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1412 if (ret)
1413 mlog_errno(ret);
1414
1415 if (tmpend < end) {
1416 /*
1417 * This may make start and end equal, but the zeroing
1418 * code will skip any work in that case so there's no
1419 * need to catch it up here.
1420 */
1421 start = end & ~(osb->s_clustersize - 1);
1422
1423 mlog(0, "2nd range: start: %llu, end: %llu\n",
1424 (unsigned long long)start, (unsigned long long)end);
1425
1426 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1427 if (ret)
1428 mlog_errno(ret);
1429 }
1430
1431 ocfs2_commit_trans(osb, handle);
1432 out:
1433 return ret;
1434 }
1435
1436 static int ocfs2_remove_inode_range(struct inode *inode,
1437 struct buffer_head *di_bh, u64 byte_start,
1438 u64 byte_len)
1439 {
1440 int ret = 0;
1441 u32 trunc_start, trunc_len, cpos, phys_cpos, alloc_size;
1442 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1443 struct ocfs2_cached_dealloc_ctxt dealloc;
1444
1445 ocfs2_init_dealloc_ctxt(&dealloc);
1446
1447 if (byte_len == 0)
1448 return 0;
1449
1450 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1451 trunc_len = (byte_start + byte_len) >> osb->s_clustersize_bits;
1452 if (trunc_len >= trunc_start)
1453 trunc_len -= trunc_start;
1454 else
1455 trunc_len = 0;
1456
1457 mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u\n",
1458 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1459 (unsigned long long)byte_start,
1460 (unsigned long long)byte_len, trunc_start, trunc_len);
1461
1462 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1463 if (ret) {
1464 mlog_errno(ret);
1465 goto out;
1466 }
1467
1468 cpos = trunc_start;
1469 while (trunc_len) {
1470 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1471 &alloc_size, NULL);
1472 if (ret) {
1473 mlog_errno(ret);
1474 goto out;
1475 }
1476
1477 if (alloc_size > trunc_len)
1478 alloc_size = trunc_len;
1479
1480 /* Only do work for non-holes */
1481 if (phys_cpos != 0) {
1482 ret = __ocfs2_remove_inode_range(inode, di_bh, cpos,
1483 phys_cpos, alloc_size,
1484 &dealloc);
1485 if (ret) {
1486 mlog_errno(ret);
1487 goto out;
1488 }
1489 }
1490
1491 cpos += alloc_size;
1492 trunc_len -= alloc_size;
1493 }
1494
1495 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1496
1497 out:
1498 ocfs2_schedule_truncate_log_flush(osb, 1);
1499 ocfs2_run_deallocs(osb, &dealloc);
1500
1501 return ret;
1502 }
1503
1504 /*
1505 * Parts of this function taken from xfs_change_file_space()
1506 */
1507 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1508 struct ocfs2_space_resv *sr)
1509 {
1510 int ret;
1511 s64 llen;
1512 struct inode *inode = file->f_path.dentry->d_inode;
1513 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1514 struct buffer_head *di_bh = NULL;
1515 handle_t *handle;
1516 unsigned long long max_off = ocfs2_max_file_offset(inode->i_sb->s_blocksize_bits);
1517
1518 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1519 !ocfs2_writes_unwritten_extents(osb))
1520 return -ENOTTY;
1521 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1522 !ocfs2_sparse_alloc(osb))
1523 return -ENOTTY;
1524
1525 if (!S_ISREG(inode->i_mode))
1526 return -EINVAL;
1527
1528 if (!(file->f_mode & FMODE_WRITE))
1529 return -EBADF;
1530
1531 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1532 return -EROFS;
1533
1534 mutex_lock(&inode->i_mutex);
1535
1536 /*
1537 * This prevents concurrent writes on other nodes
1538 */
1539 ret = ocfs2_rw_lock(inode, 1);
1540 if (ret) {
1541 mlog_errno(ret);
1542 goto out;
1543 }
1544
1545 ret = ocfs2_meta_lock(inode, &di_bh, 1);
1546 if (ret) {
1547 mlog_errno(ret);
1548 goto out_rw_unlock;
1549 }
1550
1551 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1552 ret = -EPERM;
1553 goto out_meta_unlock;
1554 }
1555
1556 switch (sr->l_whence) {
1557 case 0: /*SEEK_SET*/
1558 break;
1559 case 1: /*SEEK_CUR*/
1560 sr->l_start += file->f_pos;
1561 break;
1562 case 2: /*SEEK_END*/
1563 sr->l_start += i_size_read(inode);
1564 break;
1565 default:
1566 ret = -EINVAL;
1567 goto out_meta_unlock;
1568 }
1569 sr->l_whence = 0;
1570
1571 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1572
1573 if (sr->l_start < 0
1574 || sr->l_start > max_off
1575 || (sr->l_start + llen) < 0
1576 || (sr->l_start + llen) > max_off) {
1577 ret = -EINVAL;
1578 goto out_meta_unlock;
1579 }
1580
1581 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) {
1582 if (sr->l_len <= 0) {
1583 ret = -EINVAL;
1584 goto out_meta_unlock;
1585 }
1586 }
1587
1588 if (should_remove_suid(file->f_path.dentry)) {
1589 ret = __ocfs2_write_remove_suid(inode, di_bh);
1590 if (ret) {
1591 mlog_errno(ret);
1592 goto out_meta_unlock;
1593 }
1594 }
1595
1596 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1597 switch (cmd) {
1598 case OCFS2_IOC_RESVSP:
1599 case OCFS2_IOC_RESVSP64:
1600 /*
1601 * This takes unsigned offsets, but the signed ones we
1602 * pass have been checked against overflow above.
1603 */
1604 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1605 sr->l_len);
1606 break;
1607 case OCFS2_IOC_UNRESVSP:
1608 case OCFS2_IOC_UNRESVSP64:
1609 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1610 sr->l_len);
1611 break;
1612 default:
1613 ret = -EINVAL;
1614 }
1615 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1616 if (ret) {
1617 mlog_errno(ret);
1618 goto out_meta_unlock;
1619 }
1620
1621 /*
1622 * We update c/mtime for these changes
1623 */
1624 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1625 if (IS_ERR(handle)) {
1626 ret = PTR_ERR(handle);
1627 mlog_errno(ret);
1628 goto out_meta_unlock;
1629 }
1630
1631 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1632 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1633 if (ret < 0)
1634 mlog_errno(ret);
1635
1636 ocfs2_commit_trans(osb, handle);
1637
1638 out_meta_unlock:
1639 brelse(di_bh);
1640 ocfs2_meta_unlock(inode, 1);
1641 out_rw_unlock:
1642 ocfs2_rw_unlock(inode, 1);
1643
1644 mutex_unlock(&inode->i_mutex);
1645 out:
1646 return ret;
1647 }
1648
1649 static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
1650 loff_t *ppos,
1651 size_t count,
1652 int appending,
1653 int *direct_io)
1654 {
1655 int ret = 0, meta_level = appending;
1656 struct inode *inode = dentry->d_inode;
1657 u32 clusters;
1658 loff_t newsize, saved_pos;
1659
1660 /*
1661 * We sample i_size under a read level meta lock to see if our write
1662 * is extending the file, if it is we back off and get a write level
1663 * meta lock.
1664 */
1665 for(;;) {
1666 ret = ocfs2_meta_lock(inode, NULL, meta_level);
1667 if (ret < 0) {
1668 meta_level = -1;
1669 mlog_errno(ret);
1670 goto out;
1671 }
1672
1673 /* Clear suid / sgid if necessary. We do this here
1674 * instead of later in the write path because
1675 * remove_suid() calls ->setattr without any hint that
1676 * we may have already done our cluster locking. Since
1677 * ocfs2_setattr() *must* take cluster locks to
1678 * proceeed, this will lead us to recursively lock the
1679 * inode. There's also the dinode i_size state which
1680 * can be lost via setattr during extending writes (we
1681 * set inode->i_size at the end of a write. */
1682 if (should_remove_suid(dentry)) {
1683 if (meta_level == 0) {
1684 ocfs2_meta_unlock(inode, meta_level);
1685 meta_level = 1;
1686 continue;
1687 }
1688
1689 ret = ocfs2_write_remove_suid(inode);
1690 if (ret < 0) {
1691 mlog_errno(ret);
1692 goto out_unlock;
1693 }
1694 }
1695
1696 /* work on a copy of ppos until we're sure that we won't have
1697 * to recalculate it due to relocking. */
1698 if (appending) {
1699 saved_pos = i_size_read(inode);
1700 mlog(0, "O_APPEND: inode->i_size=%llu\n", saved_pos);
1701 } else {
1702 saved_pos = *ppos;
1703 }
1704
1705 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb))) {
1706 loff_t end = saved_pos + count;
1707
1708 /*
1709 * Skip the O_DIRECT checks if we don't need
1710 * them.
1711 */
1712 if (!direct_io || !(*direct_io))
1713 break;
1714
1715 /*
1716 * Allowing concurrent direct writes means
1717 * i_size changes wouldn't be synchronized, so
1718 * one node could wind up truncating another
1719 * nodes writes.
1720 */
1721 if (end > i_size_read(inode)) {
1722 *direct_io = 0;
1723 break;
1724 }
1725
1726 /*
1727 * We don't fill holes during direct io, so
1728 * check for them here. If any are found, the
1729 * caller will have to retake some cluster
1730 * locks and initiate the io as buffered.
1731 */
1732 ret = ocfs2_check_range_for_holes(inode, saved_pos,
1733 count);
1734 if (ret == 1) {
1735 *direct_io = 0;
1736 ret = 0;
1737 } else if (ret < 0)
1738 mlog_errno(ret);
1739 break;
1740 }
1741
1742 /*
1743 * The rest of this loop is concerned with legacy file
1744 * systems which don't support sparse files.
1745 */
1746
1747 newsize = count + saved_pos;
1748
1749 mlog(0, "pos=%lld newsize=%lld cursize=%lld\n",
1750 (long long) saved_pos, (long long) newsize,
1751 (long long) i_size_read(inode));
1752
1753 /* No need for a higher level metadata lock if we're
1754 * never going past i_size. */
1755 if (newsize <= i_size_read(inode))
1756 break;
1757
1758 if (meta_level == 0) {
1759 ocfs2_meta_unlock(inode, meta_level);
1760 meta_level = 1;
1761 continue;
1762 }
1763
1764 spin_lock(&OCFS2_I(inode)->ip_lock);
1765 clusters = ocfs2_clusters_for_bytes(inode->i_sb, newsize) -
1766 OCFS2_I(inode)->ip_clusters;
1767 spin_unlock(&OCFS2_I(inode)->ip_lock);
1768
1769 mlog(0, "Writing at EOF, may need more allocation: "
1770 "i_size = %lld, newsize = %lld, need %u clusters\n",
1771 (long long) i_size_read(inode), (long long) newsize,
1772 clusters);
1773
1774 /* We only want to continue the rest of this loop if
1775 * our extend will actually require more
1776 * allocation. */
1777 if (!clusters)
1778 break;
1779
1780 ret = ocfs2_extend_file(inode, NULL, newsize, count);
1781 if (ret < 0) {
1782 if (ret != -ENOSPC)
1783 mlog_errno(ret);
1784 goto out_unlock;
1785 }
1786 break;
1787 }
1788
1789 if (appending)
1790 *ppos = saved_pos;
1791
1792 out_unlock:
1793 ocfs2_meta_unlock(inode, meta_level);
1794
1795 out:
1796 return ret;
1797 }
1798
1799 static inline void
1800 ocfs2_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
1801 {
1802 const struct iovec *iov = *iovp;
1803 size_t base = *basep;
1804
1805 do {
1806 int copy = min(bytes, iov->iov_len - base);
1807
1808 bytes -= copy;
1809 base += copy;
1810 if (iov->iov_len == base) {
1811 iov++;
1812 base = 0;
1813 }
1814 } while (bytes);
1815 *iovp = iov;
1816 *basep = base;
1817 }
1818
1819 static struct page * ocfs2_get_write_source(char **ret_src_buf,
1820 const struct iovec *cur_iov,
1821 size_t iov_offset)
1822 {
1823 int ret;
1824 char *buf = cur_iov->iov_base + iov_offset;
1825 struct page *src_page = NULL;
1826 unsigned long off;
1827
1828 off = (unsigned long)(buf) & ~PAGE_CACHE_MASK;
1829
1830 if (!segment_eq(get_fs(), KERNEL_DS)) {
1831 /*
1832 * Pull in the user page. We want to do this outside
1833 * of the meta data locks in order to preserve locking
1834 * order in case of page fault.
1835 */
1836 ret = get_user_pages(current, current->mm,
1837 (unsigned long)buf & PAGE_CACHE_MASK, 1,
1838 0, 0, &src_page, NULL);
1839 if (ret == 1)
1840 *ret_src_buf = kmap(src_page) + off;
1841 else
1842 src_page = ERR_PTR(-EFAULT);
1843 } else {
1844 *ret_src_buf = buf;
1845 }
1846
1847 return src_page;
1848 }
1849
1850 static void ocfs2_put_write_source(struct page *page)
1851 {
1852 if (page) {
1853 kunmap(page);
1854 page_cache_release(page);
1855 }
1856 }
1857
1858 static ssize_t ocfs2_file_buffered_write(struct file *file, loff_t *ppos,
1859 const struct iovec *iov,
1860 unsigned long nr_segs,
1861 size_t count,
1862 ssize_t o_direct_written)
1863 {
1864 int ret = 0;
1865 ssize_t copied, total = 0;
1866 size_t iov_offset = 0, bytes;
1867 loff_t pos;
1868 const struct iovec *cur_iov = iov;
1869 struct page *user_page, *page;
1870 char *buf, *dst;
1871 void *fsdata;
1872
1873 /*
1874 * handle partial DIO write. Adjust cur_iov if needed.
1875 */
1876 ocfs2_set_next_iovec(&cur_iov, &iov_offset, o_direct_written);
1877
1878 do {
1879 pos = *ppos;
1880
1881 user_page = ocfs2_get_write_source(&buf, cur_iov, iov_offset);
1882 if (IS_ERR(user_page)) {
1883 ret = PTR_ERR(user_page);
1884 goto out;
1885 }
1886
1887 /* Stay within our page boundaries */
1888 bytes = min((PAGE_CACHE_SIZE - ((unsigned long)pos & ~PAGE_CACHE_MASK)),
1889 (PAGE_CACHE_SIZE - ((unsigned long)buf & ~PAGE_CACHE_MASK)));
1890 /* Stay within the vector boundary */
1891 bytes = min_t(size_t, bytes, cur_iov->iov_len - iov_offset);
1892 /* Stay within count */
1893 bytes = min(bytes, count);
1894
1895 page = NULL;
1896 ret = ocfs2_write_begin(file, file->f_mapping, pos, bytes, 0,
1897 &page, &fsdata);
1898 if (ret) {
1899 mlog_errno(ret);
1900 goto out;
1901 }
1902
1903 dst = kmap_atomic(page, KM_USER0);
1904 memcpy(dst + (pos & (PAGE_CACHE_SIZE - 1)), buf, bytes);
1905 kunmap_atomic(dst, KM_USER0);
1906 flush_dcache_page(page);
1907 ocfs2_put_write_source(user_page);
1908
1909 copied = ocfs2_write_end(file, file->f_mapping, pos, bytes,
1910 bytes, page, fsdata);
1911 if (copied < 0) {
1912 mlog_errno(copied);
1913 ret = copied;
1914 goto out;
1915 }
1916
1917 total += copied;
1918 *ppos = pos + copied;
1919 count -= copied;
1920
1921 ocfs2_set_next_iovec(&cur_iov, &iov_offset, copied);
1922 } while(count);
1923
1924 out:
1925 return total ? total : ret;
1926 }
1927
1928 static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
1929 const struct iovec *iov,
1930 unsigned long nr_segs,
1931 loff_t pos)
1932 {
1933 int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
1934 int can_do_direct, sync = 0;
1935 ssize_t written = 0;
1936 size_t ocount; /* original count */
1937 size_t count; /* after file limit checks */
1938 loff_t *ppos = &iocb->ki_pos;
1939 struct file *file = iocb->ki_filp;
1940 struct inode *inode = file->f_path.dentry->d_inode;
1941
1942 mlog_entry("(0x%p, %u, '%.*s')\n", file,
1943 (unsigned int)nr_segs,
1944 file->f_path.dentry->d_name.len,
1945 file->f_path.dentry->d_name.name);
1946
1947 if (iocb->ki_left == 0)
1948 return 0;
1949
1950 ret = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
1951 if (ret)
1952 return ret;
1953
1954 count = ocount;
1955
1956 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1957
1958 appending = file->f_flags & O_APPEND ? 1 : 0;
1959 direct_io = file->f_flags & O_DIRECT ? 1 : 0;
1960
1961 mutex_lock(&inode->i_mutex);
1962
1963 relock:
1964 /* to match setattr's i_mutex -> i_alloc_sem -> rw_lock ordering */
1965 if (direct_io) {
1966 down_read(&inode->i_alloc_sem);
1967 have_alloc_sem = 1;
1968 }
1969
1970 /* concurrent O_DIRECT writes are allowed */
1971 rw_level = !direct_io;
1972 ret = ocfs2_rw_lock(inode, rw_level);
1973 if (ret < 0) {
1974 mlog_errno(ret);
1975 goto out_sems;
1976 }
1977
1978 can_do_direct = direct_io;
1979 ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
1980 iocb->ki_left, appending,
1981 &can_do_direct);
1982 if (ret < 0) {
1983 mlog_errno(ret);
1984 goto out;
1985 }
1986
1987 /*
1988 * We can't complete the direct I/O as requested, fall back to
1989 * buffered I/O.
1990 */
1991 if (direct_io && !can_do_direct) {
1992 ocfs2_rw_unlock(inode, rw_level);
1993 up_read(&inode->i_alloc_sem);
1994
1995 have_alloc_sem = 0;
1996 rw_level = -1;
1997
1998 direct_io = 0;
1999 sync = 1;
2000 goto relock;
2001 }
2002
2003 if (!sync && ((file->f_flags & O_SYNC) || IS_SYNC(inode)))
2004 sync = 1;
2005
2006 /*
2007 * XXX: Is it ok to execute these checks a second time?
2008 */
2009 ret = generic_write_checks(file, ppos, &count, S_ISBLK(inode->i_mode));
2010 if (ret)
2011 goto out;
2012
2013 /*
2014 * Set pos so that sync_page_range_nolock() below understands
2015 * where to start from. We might've moved it around via the
2016 * calls above. The range we want to actually sync starts from
2017 * *ppos here.
2018 *
2019 */
2020 pos = *ppos;
2021
2022 /* communicate with ocfs2_dio_end_io */
2023 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2024
2025 if (direct_io) {
2026 written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
2027 ppos, count, ocount);
2028 if (written < 0) {
2029 ret = written;
2030 goto out_dio;
2031 }
2032 } else {
2033 written = ocfs2_file_buffered_write(file, ppos, iov, nr_segs,
2034 count, written);
2035 if (written < 0) {
2036 ret = written;
2037 if (ret != -EFAULT || ret != -ENOSPC)
2038 mlog_errno(ret);
2039 goto out;
2040 }
2041 }
2042
2043 out_dio:
2044 /* buffered aio wouldn't have proper lock coverage today */
2045 BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
2046
2047 /*
2048 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2049 * function pointer which is called when o_direct io completes so that
2050 * it can unlock our rw lock. (it's the clustered equivalent of
2051 * i_alloc_sem; protects truncate from racing with pending ios).
2052 * Unfortunately there are error cases which call end_io and others
2053 * that don't. so we don't have to unlock the rw_lock if either an
2054 * async dio is going to do it in the future or an end_io after an
2055 * error has already done it.
2056 */
2057 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2058 rw_level = -1;
2059 have_alloc_sem = 0;
2060 }
2061
2062 out:
2063 if (rw_level != -1)
2064 ocfs2_rw_unlock(inode, rw_level);
2065
2066 out_sems:
2067 if (have_alloc_sem)
2068 up_read(&inode->i_alloc_sem);
2069
2070 if (written > 0 && sync) {
2071 ssize_t err;
2072
2073 err = sync_page_range_nolock(inode, file->f_mapping, pos, count);
2074 if (err < 0)
2075 written = err;
2076 }
2077
2078 mutex_unlock(&inode->i_mutex);
2079
2080 mlog_exit(ret);
2081 return written ? written : ret;
2082 }
2083
2084 static int ocfs2_splice_write_actor(struct pipe_inode_info *pipe,
2085 struct pipe_buffer *buf,
2086 struct splice_desc *sd)
2087 {
2088 int ret, count;
2089 ssize_t copied = 0;
2090 struct file *file = sd->u.file;
2091 unsigned int offset;
2092 struct page *page = NULL;
2093 void *fsdata;
2094 char *src, *dst;
2095
2096 ret = buf->ops->confirm(pipe, buf);
2097 if (ret)
2098 goto out;
2099
2100 offset = sd->pos & ~PAGE_CACHE_MASK;
2101 count = sd->len;
2102 if (count + offset > PAGE_CACHE_SIZE)
2103 count = PAGE_CACHE_SIZE - offset;
2104
2105 ret = ocfs2_write_begin(file, file->f_mapping, sd->pos, count, 0,
2106 &page, &fsdata);
2107 if (ret) {
2108 mlog_errno(ret);
2109 goto out;
2110 }
2111
2112 src = buf->ops->map(pipe, buf, 1);
2113 dst = kmap_atomic(page, KM_USER1);
2114 memcpy(dst + offset, src + buf->offset, count);
2115 kunmap_atomic(page, KM_USER1);
2116 buf->ops->unmap(pipe, buf, src);
2117
2118 copied = ocfs2_write_end(file, file->f_mapping, sd->pos, count, count,
2119 page, fsdata);
2120 if (copied < 0) {
2121 mlog_errno(copied);
2122 ret = copied;
2123 goto out;
2124 }
2125 out:
2126
2127 return copied ? copied : ret;
2128 }
2129
2130 static ssize_t __ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2131 struct file *out,
2132 loff_t *ppos,
2133 size_t len,
2134 unsigned int flags)
2135 {
2136 int ret, err;
2137 struct address_space *mapping = out->f_mapping;
2138 struct inode *inode = mapping->host;
2139 struct splice_desc sd = {
2140 .total_len = len,
2141 .flags = flags,
2142 .pos = *ppos,
2143 .u.file = out,
2144 };
2145
2146 ret = __splice_from_pipe(pipe, &sd, ocfs2_splice_write_actor);
2147 if (ret > 0) {
2148 *ppos += ret;
2149
2150 if (unlikely((out->f_flags & O_SYNC) || IS_SYNC(inode))) {
2151 err = generic_osync_inode(inode, mapping,
2152 OSYNC_METADATA|OSYNC_DATA);
2153 if (err)
2154 ret = err;
2155 }
2156 }
2157
2158 return ret;
2159 }
2160
2161 static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2162 struct file *out,
2163 loff_t *ppos,
2164 size_t len,
2165 unsigned int flags)
2166 {
2167 int ret;
2168 struct inode *inode = out->f_path.dentry->d_inode;
2169
2170 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", out, pipe,
2171 (unsigned int)len,
2172 out->f_path.dentry->d_name.len,
2173 out->f_path.dentry->d_name.name);
2174
2175 inode_double_lock(inode, pipe->inode);
2176
2177 ret = ocfs2_rw_lock(inode, 1);
2178 if (ret < 0) {
2179 mlog_errno(ret);
2180 goto out;
2181 }
2182
2183 ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, ppos, len, 0,
2184 NULL);
2185 if (ret < 0) {
2186 mlog_errno(ret);
2187 goto out_unlock;
2188 }
2189
2190 /* ok, we're done with i_size and alloc work */
2191 ret = __ocfs2_file_splice_write(pipe, out, ppos, len, flags);
2192
2193 out_unlock:
2194 ocfs2_rw_unlock(inode, 1);
2195 out:
2196 inode_double_unlock(inode, pipe->inode);
2197
2198 mlog_exit(ret);
2199 return ret;
2200 }
2201
2202 static ssize_t ocfs2_file_splice_read(struct file *in,
2203 loff_t *ppos,
2204 struct pipe_inode_info *pipe,
2205 size_t len,
2206 unsigned int flags)
2207 {
2208 int ret = 0;
2209 struct inode *inode = in->f_path.dentry->d_inode;
2210
2211 mlog_entry("(0x%p, 0x%p, %u, '%.*s')\n", in, pipe,
2212 (unsigned int)len,
2213 in->f_path.dentry->d_name.len,
2214 in->f_path.dentry->d_name.name);
2215
2216 /*
2217 * See the comment in ocfs2_file_aio_read()
2218 */
2219 ret = ocfs2_meta_lock(inode, NULL, 0);
2220 if (ret < 0) {
2221 mlog_errno(ret);
2222 goto bail;
2223 }
2224 ocfs2_meta_unlock(inode, 0);
2225
2226 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2227
2228 bail:
2229 mlog_exit(ret);
2230 return ret;
2231 }
2232
2233 static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
2234 const struct iovec *iov,
2235 unsigned long nr_segs,
2236 loff_t pos)
2237 {
2238 int ret = 0, rw_level = -1, have_alloc_sem = 0, lock_level = 0;
2239 struct file *filp = iocb->ki_filp;
2240 struct inode *inode = filp->f_path.dentry->d_inode;
2241
2242 mlog_entry("(0x%p, %u, '%.*s')\n", filp,
2243 (unsigned int)nr_segs,
2244 filp->f_path.dentry->d_name.len,
2245 filp->f_path.dentry->d_name.name);
2246
2247 if (!inode) {
2248 ret = -EINVAL;
2249 mlog_errno(ret);
2250 goto bail;
2251 }
2252
2253 /*
2254 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2255 * need locks to protect pending reads from racing with truncate.
2256 */
2257 if (filp->f_flags & O_DIRECT) {
2258 down_read(&inode->i_alloc_sem);
2259 have_alloc_sem = 1;
2260
2261 ret = ocfs2_rw_lock(inode, 0);
2262 if (ret < 0) {
2263 mlog_errno(ret);
2264 goto bail;
2265 }
2266 rw_level = 0;
2267 /* communicate with ocfs2_dio_end_io */
2268 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2269 }
2270
2271 /*
2272 * We're fine letting folks race truncates and extending
2273 * writes with read across the cluster, just like they can
2274 * locally. Hence no rw_lock during read.
2275 *
2276 * Take and drop the meta data lock to update inode fields
2277 * like i_size. This allows the checks down below
2278 * generic_file_aio_read() a chance of actually working.
2279 */
2280 ret = ocfs2_meta_lock_atime(inode, filp->f_vfsmnt, &lock_level);
2281 if (ret < 0) {
2282 mlog_errno(ret);
2283 goto bail;
2284 }
2285 ocfs2_meta_unlock(inode, lock_level);
2286
2287 ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
2288 if (ret == -EINVAL)
2289 mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
2290
2291 /* buffered aio wouldn't have proper lock coverage today */
2292 BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
2293
2294 /* see ocfs2_file_aio_write */
2295 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2296 rw_level = -1;
2297 have_alloc_sem = 0;
2298 }
2299
2300 bail:
2301 if (have_alloc_sem)
2302 up_read(&inode->i_alloc_sem);
2303 if (rw_level != -1)
2304 ocfs2_rw_unlock(inode, rw_level);
2305 mlog_exit(ret);
2306
2307 return ret;
2308 }
2309
2310 const struct inode_operations ocfs2_file_iops = {
2311 .setattr = ocfs2_setattr,
2312 .getattr = ocfs2_getattr,
2313 .permission = ocfs2_permission,
2314 };
2315
2316 const struct inode_operations ocfs2_special_file_iops = {
2317 .setattr = ocfs2_setattr,
2318 .getattr = ocfs2_getattr,
2319 .permission = ocfs2_permission,
2320 };
2321
2322 const struct file_operations ocfs2_fops = {
2323 .read = do_sync_read,
2324 .write = do_sync_write,
2325 .mmap = ocfs2_mmap,
2326 .fsync = ocfs2_sync_file,
2327 .release = ocfs2_file_release,
2328 .open = ocfs2_file_open,
2329 .aio_read = ocfs2_file_aio_read,
2330 .aio_write = ocfs2_file_aio_write,
2331 .ioctl = ocfs2_ioctl,
2332 #ifdef CONFIG_COMPAT
2333 .compat_ioctl = ocfs2_compat_ioctl,
2334 #endif
2335 .splice_read = ocfs2_file_splice_read,
2336 .splice_write = ocfs2_file_splice_write,
2337 };
2338
2339 const struct file_operations ocfs2_dops = {
2340 .read = generic_read_dir,
2341 .readdir = ocfs2_readdir,
2342 .fsync = ocfs2_sync_file,
2343 .ioctl = ocfs2_ioctl,
2344 #ifdef CONFIG_COMPAT
2345 .compat_ioctl = ocfs2_compat_ioctl,
2346 #endif
2347 };
This page took 0.100676 seconds and 5 git commands to generate.