Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
a844f451 | 20 | #include "xfs_bit.h" |
1da177e4 | 21 | #include "xfs_log.h" |
a844f451 | 22 | #include "xfs_inum.h" |
1da177e4 | 23 | #include "xfs_sb.h" |
a844f451 | 24 | #include "xfs_ag.h" |
1da177e4 | 25 | #include "xfs_trans.h" |
1da177e4 LT |
26 | #include "xfs_mount.h" |
27 | #include "xfs_bmap_btree.h" | |
1da177e4 | 28 | #include "xfs_alloc.h" |
1da177e4 LT |
29 | #include "xfs_dinode.h" |
30 | #include "xfs_inode.h" | |
fd3200be | 31 | #include "xfs_inode_item.h" |
dda35b8f | 32 | #include "xfs_bmap.h" |
1da177e4 | 33 | #include "xfs_error.h" |
739bfb2a | 34 | #include "xfs_vnodeops.h" |
f999a5bf | 35 | #include "xfs_da_btree.h" |
ddcd856d | 36 | #include "xfs_ioctl.h" |
dda35b8f | 37 | #include "xfs_trace.h" |
1da177e4 LT |
38 | |
39 | #include <linux/dcache.h> | |
2fe17c10 | 40 | #include <linux/falloc.h> |
1da177e4 | 41 | |
f0f37e2f | 42 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 43 | |
487f84f3 DC |
44 | /* |
45 | * Locking primitives for read and write IO paths to ensure we consistently use | |
46 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
47 | */ | |
48 | static inline void | |
49 | xfs_rw_ilock( | |
50 | struct xfs_inode *ip, | |
51 | int type) | |
52 | { | |
53 | if (type & XFS_IOLOCK_EXCL) | |
54 | mutex_lock(&VFS_I(ip)->i_mutex); | |
55 | xfs_ilock(ip, type); | |
56 | } | |
57 | ||
58 | static inline void | |
59 | xfs_rw_iunlock( | |
60 | struct xfs_inode *ip, | |
61 | int type) | |
62 | { | |
63 | xfs_iunlock(ip, type); | |
64 | if (type & XFS_IOLOCK_EXCL) | |
65 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
66 | } | |
67 | ||
68 | static inline void | |
69 | xfs_rw_ilock_demote( | |
70 | struct xfs_inode *ip, | |
71 | int type) | |
72 | { | |
73 | xfs_ilock_demote(ip, type); | |
74 | if (type & XFS_IOLOCK_EXCL) | |
75 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
76 | } | |
77 | ||
dda35b8f CH |
78 | /* |
79 | * xfs_iozero | |
80 | * | |
81 | * xfs_iozero clears the specified range of buffer supplied, | |
82 | * and marks all the affected blocks as valid and modified. If | |
83 | * an affected block is not allocated, it will be allocated. If | |
84 | * an affected block is not completely overwritten, and is not | |
85 | * valid before the operation, it will be read from disk before | |
86 | * being partially zeroed. | |
87 | */ | |
88 | STATIC int | |
89 | xfs_iozero( | |
90 | struct xfs_inode *ip, /* inode */ | |
91 | loff_t pos, /* offset in file */ | |
92 | size_t count) /* size of data to zero */ | |
93 | { | |
94 | struct page *page; | |
95 | struct address_space *mapping; | |
96 | int status; | |
97 | ||
98 | mapping = VFS_I(ip)->i_mapping; | |
99 | do { | |
100 | unsigned offset, bytes; | |
101 | void *fsdata; | |
102 | ||
103 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
104 | bytes = PAGE_CACHE_SIZE - offset; | |
105 | if (bytes > count) | |
106 | bytes = count; | |
107 | ||
108 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
109 | AOP_FLAG_UNINTERRUPTIBLE, | |
110 | &page, &fsdata); | |
111 | if (status) | |
112 | break; | |
113 | ||
114 | zero_user(page, offset, bytes); | |
115 | ||
116 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
117 | page, fsdata); | |
118 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
119 | pos += bytes; | |
120 | count -= bytes; | |
121 | status = 0; | |
122 | } while (count); | |
123 | ||
124 | return (-status); | |
125 | } | |
126 | ||
fd3200be CH |
127 | STATIC int |
128 | xfs_file_fsync( | |
129 | struct file *file, | |
02c24a82 JB |
130 | loff_t start, |
131 | loff_t end, | |
fd3200be CH |
132 | int datasync) |
133 | { | |
7ea80859 CH |
134 | struct inode *inode = file->f_mapping->host; |
135 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 136 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
137 | struct xfs_trans *tp; |
138 | int error = 0; | |
139 | int log_flushed = 0; | |
140 | ||
cca28fb8 | 141 | trace_xfs_file_fsync(ip); |
fd3200be | 142 | |
02c24a82 JB |
143 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
144 | if (error) | |
145 | return error; | |
146 | ||
a27a263b | 147 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
148 | return -XFS_ERROR(EIO); |
149 | ||
150 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
151 | ||
d1166ec7 | 152 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
37bc5743 | 153 | xfs_ioend_wait(ip); |
d1166ec7 | 154 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
37bc5743 | 155 | |
a27a263b CH |
156 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
157 | /* | |
158 | * If we have an RT and/or log subvolume we need to make sure | |
159 | * to flush the write cache the device used for file data | |
160 | * first. This is to ensure newly written file data make | |
161 | * it to disk before logging the new inode size in case of | |
162 | * an extending write. | |
163 | */ | |
164 | if (XFS_IS_REALTIME_INODE(ip)) | |
165 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
166 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
167 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
168 | } | |
169 | ||
fd3200be CH |
170 | /* |
171 | * We always need to make sure that the required inode state is safe on | |
172 | * disk. The inode might be clean but we still might need to force the | |
173 | * log because of committed transactions that haven't hit the disk yet. | |
174 | * Likewise, there could be unflushed non-transactional changes to the | |
175 | * inode core that have to go to disk and this requires us to issue | |
176 | * a synchronous transaction to capture these changes correctly. | |
177 | * | |
178 | * This code relies on the assumption that if the i_update_core field | |
179 | * of the inode is clear and the inode is unpinned then it is clean | |
180 | * and no action is required. | |
181 | */ | |
182 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
183 | ||
66d834ea CH |
184 | /* |
185 | * First check if the VFS inode is marked dirty. All the dirtying | |
186 | * of non-transactional updates no goes through mark_inode_dirty*, | |
187 | * which allows us to distinguish beteeen pure timestamp updates | |
188 | * and i_size updates which need to be caught for fdatasync. | |
189 | * After that also theck for the dirty state in the XFS inode, which | |
190 | * might gets cleared when the inode gets written out via the AIL | |
191 | * or xfs_iflush_cluster. | |
192 | */ | |
7ea80859 CH |
193 | if (((inode->i_state & I_DIRTY_DATASYNC) || |
194 | ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && | |
66d834ea | 195 | ip->i_update_core) { |
fd3200be CH |
196 | /* |
197 | * Kick off a transaction to log the inode core to get the | |
198 | * updates. The sync transaction will also force the log. | |
199 | */ | |
200 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
a27a263b | 201 | tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); |
fd3200be | 202 | error = xfs_trans_reserve(tp, 0, |
a27a263b | 203 | XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); |
fd3200be CH |
204 | if (error) { |
205 | xfs_trans_cancel(tp, 0); | |
206 | return -error; | |
207 | } | |
208 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
209 | ||
210 | /* | |
211 | * Note - it's possible that we might have pushed ourselves out | |
212 | * of the way during trans_reserve which would flush the inode. | |
213 | * But there's no guarantee that the inode buffer has actually | |
214 | * gone out yet (it's delwri). Plus the buffer could be pinned | |
215 | * anyway if it's part of an inode in another recent | |
216 | * transaction. So we play it safe and fire off the | |
217 | * transaction anyway. | |
218 | */ | |
898621d5 | 219 | xfs_trans_ijoin(tp, ip); |
fd3200be CH |
220 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
221 | xfs_trans_set_sync(tp); | |
222 | error = _xfs_trans_commit(tp, 0, &log_flushed); | |
223 | ||
224 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
225 | } else { | |
226 | /* | |
227 | * Timestamps/size haven't changed since last inode flush or | |
228 | * inode transaction commit. That means either nothing got | |
229 | * written or a transaction committed which caught the updates. | |
230 | * If the latter happened and the transaction hasn't hit the | |
231 | * disk yet, the inode will be still be pinned. If it is, | |
232 | * force the log. | |
233 | */ | |
fd3200be | 234 | if (xfs_ipincount(ip)) { |
a27a263b | 235 | error = _xfs_log_force_lsn(mp, |
024910cb CH |
236 | ip->i_itemp->ili_last_lsn, |
237 | XFS_LOG_SYNC, &log_flushed); | |
fd3200be | 238 | } |
024910cb | 239 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be CH |
240 | } |
241 | ||
a27a263b CH |
242 | /* |
243 | * If we only have a single device, and the log force about was | |
244 | * a no-op we might have to flush the data device cache here. | |
245 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
246 | * an already allocated file and thus do not have any metadata to | |
247 | * commit. | |
248 | */ | |
249 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
250 | mp->m_logdev_targp == mp->m_ddev_targp && | |
251 | !XFS_IS_REALTIME_INODE(ip) && | |
252 | !log_flushed) | |
253 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
254 | |
255 | return -error; | |
256 | } | |
257 | ||
00258e36 CH |
258 | STATIC ssize_t |
259 | xfs_file_aio_read( | |
dda35b8f CH |
260 | struct kiocb *iocb, |
261 | const struct iovec *iovp, | |
00258e36 CH |
262 | unsigned long nr_segs, |
263 | loff_t pos) | |
dda35b8f CH |
264 | { |
265 | struct file *file = iocb->ki_filp; | |
266 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
267 | struct xfs_inode *ip = XFS_I(inode); |
268 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
269 | size_t size = 0; |
270 | ssize_t ret = 0; | |
00258e36 | 271 | int ioflags = 0; |
dda35b8f CH |
272 | xfs_fsize_t n; |
273 | unsigned long seg; | |
274 | ||
dda35b8f CH |
275 | XFS_STATS_INC(xs_read_calls); |
276 | ||
00258e36 CH |
277 | BUG_ON(iocb->ki_pos != pos); |
278 | ||
279 | if (unlikely(file->f_flags & O_DIRECT)) | |
280 | ioflags |= IO_ISDIRECT; | |
281 | if (file->f_mode & FMODE_NOCMTIME) | |
282 | ioflags |= IO_INVIS; | |
283 | ||
dda35b8f | 284 | /* START copy & waste from filemap.c */ |
00258e36 | 285 | for (seg = 0; seg < nr_segs; seg++) { |
dda35b8f CH |
286 | const struct iovec *iv = &iovp[seg]; |
287 | ||
288 | /* | |
289 | * If any segment has a negative length, or the cumulative | |
290 | * length ever wraps negative then return -EINVAL. | |
291 | */ | |
292 | size += iv->iov_len; | |
293 | if (unlikely((ssize_t)(size|iv->iov_len) < 0)) | |
294 | return XFS_ERROR(-EINVAL); | |
295 | } | |
296 | /* END copy & waste from filemap.c */ | |
297 | ||
298 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
299 | xfs_buftarg_t *target = | |
300 | XFS_IS_REALTIME_INODE(ip) ? | |
301 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
00258e36 | 302 | if ((iocb->ki_pos & target->bt_smask) || |
dda35b8f | 303 | (size & target->bt_smask)) { |
00258e36 CH |
304 | if (iocb->ki_pos == ip->i_size) |
305 | return 0; | |
dda35b8f CH |
306 | return -XFS_ERROR(EINVAL); |
307 | } | |
308 | } | |
309 | ||
00258e36 CH |
310 | n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; |
311 | if (n <= 0 || size == 0) | |
dda35b8f CH |
312 | return 0; |
313 | ||
314 | if (n < size) | |
315 | size = n; | |
316 | ||
317 | if (XFS_FORCED_SHUTDOWN(mp)) | |
318 | return -EIO; | |
319 | ||
0c38a251 DC |
320 | /* |
321 | * Locking is a bit tricky here. If we take an exclusive lock | |
322 | * for direct IO, we effectively serialise all new concurrent | |
323 | * read IO to this file and block it behind IO that is currently in | |
324 | * progress because IO in progress holds the IO lock shared. We only | |
325 | * need to hold the lock exclusive to blow away the page cache, so | |
326 | * only take lock exclusively if the page cache needs invalidation. | |
327 | * This allows the normal direct IO case of no page cache pages to | |
328 | * proceeed concurrently without serialisation. | |
329 | */ | |
330 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
331 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
332 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
333 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
334 | ||
00258e36 CH |
335 | if (inode->i_mapping->nrpages) { |
336 | ret = -xfs_flushinval_pages(ip, | |
337 | (iocb->ki_pos & PAGE_CACHE_MASK), | |
338 | -1, FI_REMAPF_LOCKED); | |
487f84f3 DC |
339 | if (ret) { |
340 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
341 | return ret; | |
342 | } | |
00258e36 | 343 | } |
487f84f3 | 344 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 345 | } |
dda35b8f | 346 | |
00258e36 | 347 | trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); |
dda35b8f | 348 | |
00258e36 | 349 | ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); |
dda35b8f CH |
350 | if (ret > 0) |
351 | XFS_STATS_ADD(xs_read_bytes, ret); | |
352 | ||
487f84f3 | 353 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
354 | return ret; |
355 | } | |
356 | ||
00258e36 CH |
357 | STATIC ssize_t |
358 | xfs_file_splice_read( | |
dda35b8f CH |
359 | struct file *infilp, |
360 | loff_t *ppos, | |
361 | struct pipe_inode_info *pipe, | |
362 | size_t count, | |
00258e36 | 363 | unsigned int flags) |
dda35b8f | 364 | { |
00258e36 | 365 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 366 | int ioflags = 0; |
dda35b8f CH |
367 | ssize_t ret; |
368 | ||
369 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
370 | |
371 | if (infilp->f_mode & FMODE_NOCMTIME) | |
372 | ioflags |= IO_INVIS; | |
373 | ||
dda35b8f CH |
374 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
375 | return -EIO; | |
376 | ||
487f84f3 | 377 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 378 | |
dda35b8f CH |
379 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
380 | ||
381 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
382 | if (ret > 0) | |
383 | XFS_STATS_ADD(xs_read_bytes, ret); | |
384 | ||
487f84f3 | 385 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
386 | return ret; |
387 | } | |
388 | ||
edafb6da DC |
389 | STATIC void |
390 | xfs_aio_write_isize_update( | |
391 | struct inode *inode, | |
392 | loff_t *ppos, | |
393 | ssize_t bytes_written) | |
394 | { | |
395 | struct xfs_inode *ip = XFS_I(inode); | |
396 | xfs_fsize_t isize = i_size_read(inode); | |
397 | ||
398 | if (bytes_written > 0) | |
399 | XFS_STATS_ADD(xs_write_bytes, bytes_written); | |
400 | ||
401 | if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && | |
402 | *ppos > isize)) | |
403 | *ppos = isize; | |
404 | ||
405 | if (*ppos > ip->i_size) { | |
487f84f3 | 406 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
407 | if (*ppos > ip->i_size) |
408 | ip->i_size = *ppos; | |
487f84f3 | 409 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
edafb6da DC |
410 | } |
411 | } | |
412 | ||
4c5cfd1b DC |
413 | /* |
414 | * If this was a direct or synchronous I/O that failed (such as ENOSPC) then | |
25985edc | 415 | * part of the I/O may have been written to disk before the error occurred. In |
4c5cfd1b DC |
416 | * this case the on-disk file size may have been adjusted beyond the in-memory |
417 | * file size and now needs to be truncated back. | |
418 | */ | |
419 | STATIC void | |
420 | xfs_aio_write_newsize_update( | |
7271d243 DC |
421 | struct xfs_inode *ip, |
422 | xfs_fsize_t new_size) | |
4c5cfd1b | 423 | { |
7271d243 | 424 | if (new_size == ip->i_new_size) { |
487f84f3 | 425 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL); |
7271d243 DC |
426 | if (new_size == ip->i_new_size) |
427 | ip->i_new_size = 0; | |
4c5cfd1b DC |
428 | if (ip->i_d.di_size > ip->i_size) |
429 | ip->i_d.di_size = ip->i_size; | |
487f84f3 | 430 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); |
4c5cfd1b DC |
431 | } |
432 | } | |
433 | ||
487f84f3 DC |
434 | /* |
435 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
436 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
437 | * couuld cause lock inversions between the aio_write path and the splice path | |
438 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
439 | * writes to the same inode. The only real way to fix this is to re-implement | |
440 | * the generic code here with correct locking orders. | |
441 | */ | |
00258e36 CH |
442 | STATIC ssize_t |
443 | xfs_file_splice_write( | |
dda35b8f CH |
444 | struct pipe_inode_info *pipe, |
445 | struct file *outfilp, | |
446 | loff_t *ppos, | |
447 | size_t count, | |
00258e36 | 448 | unsigned int flags) |
dda35b8f | 449 | { |
dda35b8f | 450 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 451 | struct xfs_inode *ip = XFS_I(inode); |
edafb6da | 452 | xfs_fsize_t new_size; |
00258e36 CH |
453 | int ioflags = 0; |
454 | ssize_t ret; | |
dda35b8f CH |
455 | |
456 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
457 | |
458 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
459 | ioflags |= IO_INVIS; | |
460 | ||
dda35b8f CH |
461 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
462 | return -EIO; | |
463 | ||
464 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
465 | ||
dda35b8f CH |
466 | new_size = *ppos + count; |
467 | ||
468 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
469 | if (new_size > ip->i_size) | |
470 | ip->i_new_size = new_size; | |
471 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
472 | ||
473 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); | |
474 | ||
475 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
dda35b8f | 476 | |
edafb6da | 477 | xfs_aio_write_isize_update(inode, ppos, ret); |
7271d243 | 478 | xfs_aio_write_newsize_update(ip, new_size); |
dda35b8f CH |
479 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
480 | return ret; | |
481 | } | |
482 | ||
483 | /* | |
484 | * This routine is called to handle zeroing any space in the last | |
485 | * block of the file that is beyond the EOF. We do this since the | |
486 | * size is being increased without writing anything to that block | |
487 | * and we don't want anyone to read the garbage on the disk. | |
488 | */ | |
489 | STATIC int /* error (positive) */ | |
490 | xfs_zero_last_block( | |
491 | xfs_inode_t *ip, | |
492 | xfs_fsize_t offset, | |
493 | xfs_fsize_t isize) | |
494 | { | |
495 | xfs_fileoff_t last_fsb; | |
496 | xfs_mount_t *mp = ip->i_mount; | |
497 | int nimaps; | |
498 | int zero_offset; | |
499 | int zero_len; | |
500 | int error = 0; | |
501 | xfs_bmbt_irec_t imap; | |
502 | ||
503 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); | |
504 | ||
505 | zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
506 | if (zero_offset == 0) { | |
507 | /* | |
508 | * There are no extra bytes in the last block on disk to | |
509 | * zero, so return. | |
510 | */ | |
511 | return 0; | |
512 | } | |
513 | ||
514 | last_fsb = XFS_B_TO_FSBT(mp, isize); | |
515 | nimaps = 1; | |
516 | error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, | |
b4e9181e | 517 | &nimaps, NULL); |
dda35b8f CH |
518 | if (error) { |
519 | return error; | |
520 | } | |
521 | ASSERT(nimaps > 0); | |
522 | /* | |
523 | * If the block underlying isize is just a hole, then there | |
524 | * is nothing to zero. | |
525 | */ | |
526 | if (imap.br_startblock == HOLESTARTBLOCK) { | |
527 | return 0; | |
528 | } | |
529 | /* | |
530 | * Zero the part of the last block beyond the EOF, and write it | |
531 | * out sync. We need to drop the ilock while we do this so we | |
532 | * don't deadlock when the buffer cache calls back to us. | |
533 | */ | |
534 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
535 | ||
536 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
537 | if (isize + zero_len > offset) | |
538 | zero_len = offset - isize; | |
539 | error = xfs_iozero(ip, isize, zero_len); | |
540 | ||
541 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
542 | ASSERT(error >= 0); | |
543 | return error; | |
544 | } | |
545 | ||
546 | /* | |
547 | * Zero any on disk space between the current EOF and the new, | |
548 | * larger EOF. This handles the normal case of zeroing the remainder | |
549 | * of the last block in the file and the unusual case of zeroing blocks | |
550 | * out beyond the size of the file. This second case only happens | |
551 | * with fixed size extents and when the system crashes before the inode | |
552 | * size was updated but after blocks were allocated. If fill is set, | |
553 | * then any holes in the range are filled and zeroed. If not, the holes | |
554 | * are left alone as holes. | |
555 | */ | |
556 | ||
557 | int /* error (positive) */ | |
558 | xfs_zero_eof( | |
559 | xfs_inode_t *ip, | |
560 | xfs_off_t offset, /* starting I/O offset */ | |
561 | xfs_fsize_t isize) /* current inode size */ | |
562 | { | |
563 | xfs_mount_t *mp = ip->i_mount; | |
564 | xfs_fileoff_t start_zero_fsb; | |
565 | xfs_fileoff_t end_zero_fsb; | |
566 | xfs_fileoff_t zero_count_fsb; | |
567 | xfs_fileoff_t last_fsb; | |
568 | xfs_fileoff_t zero_off; | |
569 | xfs_fsize_t zero_len; | |
570 | int nimaps; | |
571 | int error = 0; | |
572 | xfs_bmbt_irec_t imap; | |
573 | ||
574 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
575 | ASSERT(offset > isize); | |
576 | ||
577 | /* | |
578 | * First handle zeroing the block on which isize resides. | |
579 | * We only zero a part of that block so it is handled specially. | |
580 | */ | |
581 | error = xfs_zero_last_block(ip, offset, isize); | |
582 | if (error) { | |
583 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
584 | return error; | |
585 | } | |
586 | ||
587 | /* | |
588 | * Calculate the range between the new size and the old | |
589 | * where blocks needing to be zeroed may exist. To get the | |
590 | * block where the last byte in the file currently resides, | |
591 | * we need to subtract one from the size and truncate back | |
592 | * to a block boundary. We subtract 1 in case the size is | |
593 | * exactly on a block boundary. | |
594 | */ | |
595 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
596 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
597 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
598 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
599 | if (last_fsb == end_zero_fsb) { | |
600 | /* | |
601 | * The size was only incremented on its last block. | |
602 | * We took care of that above, so just return. | |
603 | */ | |
604 | return 0; | |
605 | } | |
606 | ||
607 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
608 | while (start_zero_fsb <= end_zero_fsb) { | |
609 | nimaps = 1; | |
610 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
611 | error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, | |
b4e9181e | 612 | 0, NULL, 0, &imap, &nimaps, NULL); |
dda35b8f CH |
613 | if (error) { |
614 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); | |
615 | return error; | |
616 | } | |
617 | ASSERT(nimaps > 0); | |
618 | ||
619 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
620 | imap.br_startblock == HOLESTARTBLOCK) { | |
621 | /* | |
622 | * This loop handles initializing pages that were | |
623 | * partially initialized by the code below this | |
624 | * loop. It basically zeroes the part of the page | |
625 | * that sits on a hole and sets the page as P_HOLE | |
626 | * and calls remapf if it is a mapped file. | |
627 | */ | |
628 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
629 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
630 | continue; | |
631 | } | |
632 | ||
633 | /* | |
634 | * There are blocks we need to zero. | |
635 | * Drop the inode lock while we're doing the I/O. | |
636 | * We'll still have the iolock to protect us. | |
637 | */ | |
638 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
639 | ||
640 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); | |
641 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
642 | ||
643 | if ((zero_off + zero_len) > offset) | |
644 | zero_len = offset - zero_off; | |
645 | ||
646 | error = xfs_iozero(ip, zero_off, zero_len); | |
647 | if (error) { | |
648 | goto out_lock; | |
649 | } | |
650 | ||
651 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
652 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
653 | ||
654 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
655 | } | |
656 | ||
657 | return 0; | |
658 | ||
659 | out_lock: | |
660 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
661 | ASSERT(error >= 0); | |
662 | return error; | |
663 | } | |
664 | ||
4d8d1581 DC |
665 | /* |
666 | * Common pre-write limit and setup checks. | |
667 | * | |
668 | * Returns with iolock held according to @iolock. | |
669 | */ | |
670 | STATIC ssize_t | |
671 | xfs_file_aio_write_checks( | |
672 | struct file *file, | |
673 | loff_t *pos, | |
674 | size_t *count, | |
7271d243 | 675 | xfs_fsize_t *new_sizep, |
4d8d1581 DC |
676 | int *iolock) |
677 | { | |
678 | struct inode *inode = file->f_mapping->host; | |
679 | struct xfs_inode *ip = XFS_I(inode); | |
680 | xfs_fsize_t new_size; | |
681 | int error = 0; | |
682 | ||
7271d243 DC |
683 | *new_sizep = 0; |
684 | restart: | |
4d8d1581 DC |
685 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
686 | if (error) { | |
687 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
688 | *iolock = 0; | |
689 | return error; | |
690 | } | |
691 | ||
4d8d1581 DC |
692 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) |
693 | file_update_time(file); | |
694 | ||
695 | /* | |
696 | * If the offset is beyond the size of the file, we need to zero any | |
697 | * blocks that fall between the existing EOF and the start of this | |
7271d243 DC |
698 | * write. There is no need to issue zeroing if another in-flght IO ends |
699 | * at or before this one If zeronig is needed and we are currently | |
700 | * holding the iolock shared, we need to update it to exclusive which | |
701 | * involves dropping all locks and relocking to maintain correct locking | |
702 | * order. If we do this, restart the function to ensure all checks and | |
703 | * values are still valid. | |
4d8d1581 | 704 | */ |
7271d243 DC |
705 | if ((ip->i_new_size && *pos > ip->i_new_size) || |
706 | (!ip->i_new_size && *pos > ip->i_size)) { | |
707 | if (*iolock == XFS_IOLOCK_SHARED) { | |
708 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); | |
709 | *iolock = XFS_IOLOCK_EXCL; | |
710 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
711 | goto restart; | |
712 | } | |
4d8d1581 | 713 | error = -xfs_zero_eof(ip, *pos, ip->i_size); |
7271d243 DC |
714 | } |
715 | ||
716 | /* | |
717 | * If this IO extends beyond EOF, we may need to update ip->i_new_size. | |
718 | * We have already zeroed space beyond EOF (if necessary). Only update | |
719 | * ip->i_new_size if this IO ends beyond any other in-flight writes. | |
720 | */ | |
721 | new_size = *pos + *count; | |
722 | if (new_size > ip->i_size) { | |
723 | if (new_size > ip->i_new_size) | |
724 | ip->i_new_size = new_size; | |
725 | *new_sizep = new_size; | |
726 | } | |
4d8d1581 DC |
727 | |
728 | xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); | |
729 | if (error) | |
730 | return error; | |
731 | ||
732 | /* | |
733 | * If we're writing the file then make sure to clear the setuid and | |
734 | * setgid bits if the process is not being run by root. This keeps | |
735 | * people from modifying setuid and setgid binaries. | |
736 | */ | |
737 | return file_remove_suid(file); | |
738 | ||
739 | } | |
740 | ||
f0d26e86 DC |
741 | /* |
742 | * xfs_file_dio_aio_write - handle direct IO writes | |
743 | * | |
744 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 745 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
746 | * follow locking changes and looping. |
747 | * | |
eda77982 DC |
748 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
749 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
750 | * pages are flushed out. | |
751 | * | |
752 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
753 | * allowing them to be done in parallel with reads and other direct IO writes. | |
754 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
755 | * needs to do sub-block zeroing and that requires serialisation against other | |
756 | * direct IOs to the same block. In this case we need to serialise the | |
757 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
758 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
759 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
760 | * before we try to map the overlapping block. This is currently implemented by | |
761 | * hitting it with a big hammer (i.e. xfs_ioend_wait()). | |
762 | * | |
f0d26e86 DC |
763 | * Returns with locks held indicated by @iolock and errors indicated by |
764 | * negative return values. | |
765 | */ | |
766 | STATIC ssize_t | |
767 | xfs_file_dio_aio_write( | |
768 | struct kiocb *iocb, | |
769 | const struct iovec *iovp, | |
770 | unsigned long nr_segs, | |
771 | loff_t pos, | |
772 | size_t ocount, | |
7271d243 | 773 | xfs_fsize_t *new_size, |
f0d26e86 DC |
774 | int *iolock) |
775 | { | |
776 | struct file *file = iocb->ki_filp; | |
777 | struct address_space *mapping = file->f_mapping; | |
778 | struct inode *inode = mapping->host; | |
779 | struct xfs_inode *ip = XFS_I(inode); | |
780 | struct xfs_mount *mp = ip->i_mount; | |
781 | ssize_t ret = 0; | |
f0d26e86 | 782 | size_t count = ocount; |
eda77982 | 783 | int unaligned_io = 0; |
f0d26e86 DC |
784 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
785 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
786 | ||
787 | *iolock = 0; | |
788 | if ((pos & target->bt_smask) || (count & target->bt_smask)) | |
789 | return -XFS_ERROR(EINVAL); | |
790 | ||
eda77982 DC |
791 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
792 | unaligned_io = 1; | |
793 | ||
7271d243 DC |
794 | /* |
795 | * We don't need to take an exclusive lock unless there page cache needs | |
796 | * to be invalidated or unaligned IO is being executed. We don't need to | |
797 | * consider the EOF extension case here because | |
798 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
799 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
800 | */ | |
801 | if (unaligned_io || mapping->nrpages) | |
f0d26e86 DC |
802 | *iolock = XFS_IOLOCK_EXCL; |
803 | else | |
804 | *iolock = XFS_IOLOCK_SHARED; | |
805 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
806 | ||
7271d243 | 807 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
4d8d1581 | 808 | if (ret) |
f0d26e86 DC |
809 | return ret; |
810 | ||
811 | if (mapping->nrpages) { | |
812 | WARN_ON(*iolock != XFS_IOLOCK_EXCL); | |
813 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, | |
814 | FI_REMAPF_LOCKED); | |
815 | if (ret) | |
816 | return ret; | |
817 | } | |
818 | ||
eda77982 DC |
819 | /* |
820 | * If we are doing unaligned IO, wait for all other IO to drain, | |
821 | * otherwise demote the lock if we had to flush cached pages | |
822 | */ | |
823 | if (unaligned_io) | |
824 | xfs_ioend_wait(ip); | |
825 | else if (*iolock == XFS_IOLOCK_EXCL) { | |
f0d26e86 DC |
826 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
827 | *iolock = XFS_IOLOCK_SHARED; | |
828 | } | |
829 | ||
830 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
831 | ret = generic_file_direct_write(iocb, iovp, | |
832 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
833 | ||
834 | /* No fallback to buffered IO on errors for XFS. */ | |
835 | ASSERT(ret < 0 || ret == count); | |
836 | return ret; | |
837 | } | |
838 | ||
00258e36 | 839 | STATIC ssize_t |
637bbc75 | 840 | xfs_file_buffered_aio_write( |
dda35b8f CH |
841 | struct kiocb *iocb, |
842 | const struct iovec *iovp, | |
00258e36 | 843 | unsigned long nr_segs, |
637bbc75 DC |
844 | loff_t pos, |
845 | size_t ocount, | |
7271d243 | 846 | xfs_fsize_t *new_size, |
637bbc75 | 847 | int *iolock) |
dda35b8f CH |
848 | { |
849 | struct file *file = iocb->ki_filp; | |
850 | struct address_space *mapping = file->f_mapping; | |
851 | struct inode *inode = mapping->host; | |
00258e36 | 852 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
853 | ssize_t ret; |
854 | int enospc = 0; | |
637bbc75 | 855 | size_t count = ocount; |
dda35b8f | 856 | |
637bbc75 DC |
857 | *iolock = XFS_IOLOCK_EXCL; |
858 | xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock); | |
dda35b8f | 859 | |
7271d243 | 860 | ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock); |
4d8d1581 | 861 | if (ret) |
637bbc75 | 862 | return ret; |
dda35b8f CH |
863 | |
864 | /* We can write back this queue in page reclaim */ | |
865 | current->backing_dev_info = mapping->backing_dev_info; | |
866 | ||
dda35b8f | 867 | write_retry: |
637bbc75 DC |
868 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
869 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
870 | pos, &iocb->ki_pos, count, ret); | |
871 | /* | |
872 | * if we just got an ENOSPC, flush the inode now we aren't holding any | |
873 | * page locks and retry *once* | |
874 | */ | |
875 | if (ret == -ENOSPC && !enospc) { | |
876 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); | |
877 | if (ret) | |
878 | return ret; | |
879 | enospc = 1; | |
880 | goto write_retry; | |
dda35b8f | 881 | } |
dda35b8f | 882 | current->backing_dev_info = NULL; |
637bbc75 DC |
883 | return ret; |
884 | } | |
885 | ||
886 | STATIC ssize_t | |
887 | xfs_file_aio_write( | |
888 | struct kiocb *iocb, | |
889 | const struct iovec *iovp, | |
890 | unsigned long nr_segs, | |
891 | loff_t pos) | |
892 | { | |
893 | struct file *file = iocb->ki_filp; | |
894 | struct address_space *mapping = file->f_mapping; | |
895 | struct inode *inode = mapping->host; | |
896 | struct xfs_inode *ip = XFS_I(inode); | |
897 | ssize_t ret; | |
898 | int iolock; | |
899 | size_t ocount = 0; | |
7271d243 | 900 | xfs_fsize_t new_size = 0; |
637bbc75 DC |
901 | |
902 | XFS_STATS_INC(xs_write_calls); | |
903 | ||
904 | BUG_ON(iocb->ki_pos != pos); | |
905 | ||
906 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
907 | if (ret) | |
908 | return ret; | |
909 | ||
910 | if (ocount == 0) | |
911 | return 0; | |
912 | ||
913 | xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); | |
914 | ||
915 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
916 | return -EIO; | |
917 | ||
918 | if (unlikely(file->f_flags & O_DIRECT)) | |
919 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, | |
7271d243 | 920 | ocount, &new_size, &iolock); |
637bbc75 DC |
921 | else |
922 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
7271d243 | 923 | ocount, &new_size, &iolock); |
dda35b8f | 924 | |
edafb6da | 925 | xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); |
dda35b8f | 926 | |
dda35b8f | 927 | if (ret <= 0) |
637bbc75 | 928 | goto out_unlock; |
dda35b8f | 929 | |
dda35b8f CH |
930 | /* Handle various SYNC-type writes */ |
931 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | |
932 | loff_t end = pos + ret - 1; | |
340a0a01 | 933 | int error; |
dda35b8f | 934 | |
487f84f3 | 935 | xfs_rw_iunlock(ip, iolock); |
340a0a01 | 936 | error = xfs_file_fsync(file, pos, end, |
02c24a82 | 937 | (file->f_flags & __O_SYNC) ? 0 : 1); |
487f84f3 | 938 | xfs_rw_ilock(ip, iolock); |
340a0a01 MT |
939 | if (error) |
940 | ret = error; | |
dda35b8f CH |
941 | } |
942 | ||
637bbc75 | 943 | out_unlock: |
7271d243 | 944 | xfs_aio_write_newsize_update(ip, new_size); |
487f84f3 | 945 | xfs_rw_iunlock(ip, iolock); |
a363f0c2 | 946 | return ret; |
dda35b8f CH |
947 | } |
948 | ||
2fe17c10 CH |
949 | STATIC long |
950 | xfs_file_fallocate( | |
951 | struct file *file, | |
952 | int mode, | |
953 | loff_t offset, | |
954 | loff_t len) | |
955 | { | |
956 | struct inode *inode = file->f_path.dentry->d_inode; | |
957 | long error; | |
958 | loff_t new_size = 0; | |
959 | xfs_flock64_t bf; | |
960 | xfs_inode_t *ip = XFS_I(inode); | |
961 | int cmd = XFS_IOC_RESVSP; | |
82878897 | 962 | int attr_flags = XFS_ATTR_NOLOCK; |
2fe17c10 CH |
963 | |
964 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
965 | return -EOPNOTSUPP; | |
966 | ||
967 | bf.l_whence = 0; | |
968 | bf.l_start = offset; | |
969 | bf.l_len = len; | |
970 | ||
971 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
972 | ||
973 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
974 | cmd = XFS_IOC_UNRESVSP; | |
975 | ||
976 | /* check the new inode size is valid before allocating */ | |
977 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
978 | offset + len > i_size_read(inode)) { | |
979 | new_size = offset + len; | |
980 | error = inode_newsize_ok(inode, new_size); | |
981 | if (error) | |
982 | goto out_unlock; | |
983 | } | |
984 | ||
82878897 DC |
985 | if (file->f_flags & O_DSYNC) |
986 | attr_flags |= XFS_ATTR_SYNC; | |
987 | ||
988 | error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); | |
2fe17c10 CH |
989 | if (error) |
990 | goto out_unlock; | |
991 | ||
992 | /* Change file size if needed */ | |
993 | if (new_size) { | |
994 | struct iattr iattr; | |
995 | ||
996 | iattr.ia_valid = ATTR_SIZE; | |
997 | iattr.ia_size = new_size; | |
c4ed4243 | 998 | error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); |
2fe17c10 CH |
999 | } |
1000 | ||
1001 | out_unlock: | |
1002 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
1003 | return error; | |
1004 | } | |
1005 | ||
1006 | ||
1da177e4 | 1007 | STATIC int |
3562fd45 | 1008 | xfs_file_open( |
1da177e4 | 1009 | struct inode *inode, |
f999a5bf | 1010 | struct file *file) |
1da177e4 | 1011 | { |
f999a5bf | 1012 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 1013 | return -EFBIG; |
f999a5bf CH |
1014 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
1015 | return -EIO; | |
1016 | return 0; | |
1017 | } | |
1018 | ||
1019 | STATIC int | |
1020 | xfs_dir_open( | |
1021 | struct inode *inode, | |
1022 | struct file *file) | |
1023 | { | |
1024 | struct xfs_inode *ip = XFS_I(inode); | |
1025 | int mode; | |
1026 | int error; | |
1027 | ||
1028 | error = xfs_file_open(inode, file); | |
1029 | if (error) | |
1030 | return error; | |
1031 | ||
1032 | /* | |
1033 | * If there are any blocks, read-ahead block 0 as we're almost | |
1034 | * certain to have the next operation be a read there. | |
1035 | */ | |
1036 | mode = xfs_ilock_map_shared(ip); | |
1037 | if (ip->i_d.di_nextents > 0) | |
1038 | xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); | |
1039 | xfs_iunlock(ip, mode); | |
1040 | return 0; | |
1da177e4 LT |
1041 | } |
1042 | ||
1da177e4 | 1043 | STATIC int |
3562fd45 | 1044 | xfs_file_release( |
1da177e4 LT |
1045 | struct inode *inode, |
1046 | struct file *filp) | |
1047 | { | |
739bfb2a | 1048 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
1049 | } |
1050 | ||
1da177e4 | 1051 | STATIC int |
3562fd45 | 1052 | xfs_file_readdir( |
1da177e4 LT |
1053 | struct file *filp, |
1054 | void *dirent, | |
1055 | filldir_t filldir) | |
1056 | { | |
051e7cd4 | 1057 | struct inode *inode = filp->f_path.dentry->d_inode; |
739bfb2a | 1058 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
1059 | int error; |
1060 | size_t bufsize; | |
1061 | ||
1062 | /* | |
1063 | * The Linux API doesn't pass down the total size of the buffer | |
1064 | * we read into down to the filesystem. With the filldir concept | |
1065 | * it's not needed for correct information, but the XFS dir2 leaf | |
1066 | * code wants an estimate of the buffer size to calculate it's | |
1067 | * readahead window and size the buffers used for mapping to | |
1068 | * physical blocks. | |
1069 | * | |
1070 | * Try to give it an estimate that's good enough, maybe at some | |
1071 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 1072 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 1073 | */ |
a9cc799e | 1074 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 1075 | |
739bfb2a | 1076 | error = xfs_readdir(ip, dirent, bufsize, |
051e7cd4 CH |
1077 | (xfs_off_t *)&filp->f_pos, filldir); |
1078 | if (error) | |
1079 | return -error; | |
1080 | return 0; | |
1da177e4 LT |
1081 | } |
1082 | ||
1da177e4 | 1083 | STATIC int |
3562fd45 | 1084 | xfs_file_mmap( |
1da177e4 LT |
1085 | struct file *filp, |
1086 | struct vm_area_struct *vma) | |
1087 | { | |
3562fd45 | 1088 | vma->vm_ops = &xfs_file_vm_ops; |
d0217ac0 | 1089 | vma->vm_flags |= VM_CAN_NONLINEAR; |
6fac0cb4 | 1090 | |
fbc1462b | 1091 | file_accessed(filp); |
1da177e4 LT |
1092 | return 0; |
1093 | } | |
1094 | ||
4f57dbc6 DC |
1095 | /* |
1096 | * mmap()d file has taken write protection fault and is being made | |
1097 | * writable. We can set the page state up correctly for a writable | |
1098 | * page, which means we can do correct delalloc accounting (ENOSPC | |
1099 | * checking!) and unwritten extent mapping. | |
1100 | */ | |
1101 | STATIC int | |
1102 | xfs_vm_page_mkwrite( | |
1103 | struct vm_area_struct *vma, | |
c2ec175c | 1104 | struct vm_fault *vmf) |
4f57dbc6 | 1105 | { |
c2ec175c | 1106 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
1107 | } |
1108 | ||
4b6f5d20 | 1109 | const struct file_operations xfs_file_operations = { |
1da177e4 LT |
1110 | .llseek = generic_file_llseek, |
1111 | .read = do_sync_read, | |
bb3f724e | 1112 | .write = do_sync_write, |
3562fd45 NS |
1113 | .aio_read = xfs_file_aio_read, |
1114 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1115 | .splice_read = xfs_file_splice_read, |
1116 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1117 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1118 | #ifdef CONFIG_COMPAT |
3562fd45 | 1119 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1120 | #endif |
3562fd45 NS |
1121 | .mmap = xfs_file_mmap, |
1122 | .open = xfs_file_open, | |
1123 | .release = xfs_file_release, | |
1124 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1125 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1126 | }; |
1127 | ||
4b6f5d20 | 1128 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1129 | .open = xfs_dir_open, |
1da177e4 | 1130 | .read = generic_read_dir, |
3562fd45 | 1131 | .readdir = xfs_file_readdir, |
59af1584 | 1132 | .llseek = generic_file_llseek, |
3562fd45 | 1133 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1134 | #ifdef CONFIG_COMPAT |
3562fd45 | 1135 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1136 | #endif |
3562fd45 | 1137 | .fsync = xfs_file_fsync, |
1da177e4 LT |
1138 | }; |
1139 | ||
f0f37e2f | 1140 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1141 | .fault = filemap_fault, |
4f57dbc6 | 1142 | .page_mkwrite = xfs_vm_page_mkwrite, |
6fac0cb4 | 1143 | }; |