Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
70a9883c | 20 | #include "xfs_shared.h" |
a4fbe6ab | 21 | #include "xfs_format.h" |
239880ef DC |
22 | #include "xfs_log_format.h" |
23 | #include "xfs_trans_resv.h" | |
1da177e4 | 24 | #include "xfs_sb.h" |
a844f451 | 25 | #include "xfs_ag.h" |
1da177e4 | 26 | #include "xfs_mount.h" |
57062787 DC |
27 | #include "xfs_da_format.h" |
28 | #include "xfs_da_btree.h" | |
1da177e4 | 29 | #include "xfs_inode.h" |
239880ef | 30 | #include "xfs_trans.h" |
fd3200be | 31 | #include "xfs_inode_item.h" |
dda35b8f | 32 | #include "xfs_bmap.h" |
c24b5dfa | 33 | #include "xfs_bmap_util.h" |
1da177e4 | 34 | #include "xfs_error.h" |
2b9ab5ab | 35 | #include "xfs_dir2.h" |
c24b5dfa | 36 | #include "xfs_dir2_priv.h" |
ddcd856d | 37 | #include "xfs_ioctl.h" |
dda35b8f | 38 | #include "xfs_trace.h" |
239880ef | 39 | #include "xfs_log.h" |
a4fbe6ab | 40 | #include "xfs_dinode.h" |
1da177e4 | 41 | |
a27bb332 | 42 | #include <linux/aio.h> |
1da177e4 | 43 | #include <linux/dcache.h> |
2fe17c10 | 44 | #include <linux/falloc.h> |
d126d43f | 45 | #include <linux/pagevec.h> |
1da177e4 | 46 | |
f0f37e2f | 47 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 48 | |
487f84f3 DC |
49 | /* |
50 | * Locking primitives for read and write IO paths to ensure we consistently use | |
51 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
52 | */ | |
53 | static inline void | |
54 | xfs_rw_ilock( | |
55 | struct xfs_inode *ip, | |
56 | int type) | |
57 | { | |
58 | if (type & XFS_IOLOCK_EXCL) | |
59 | mutex_lock(&VFS_I(ip)->i_mutex); | |
60 | xfs_ilock(ip, type); | |
61 | } | |
62 | ||
63 | static inline void | |
64 | xfs_rw_iunlock( | |
65 | struct xfs_inode *ip, | |
66 | int type) | |
67 | { | |
68 | xfs_iunlock(ip, type); | |
69 | if (type & XFS_IOLOCK_EXCL) | |
70 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
71 | } | |
72 | ||
73 | static inline void | |
74 | xfs_rw_ilock_demote( | |
75 | struct xfs_inode *ip, | |
76 | int type) | |
77 | { | |
78 | xfs_ilock_demote(ip, type); | |
79 | if (type & XFS_IOLOCK_EXCL) | |
80 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
81 | } | |
82 | ||
dda35b8f CH |
83 | /* |
84 | * xfs_iozero | |
85 | * | |
86 | * xfs_iozero clears the specified range of buffer supplied, | |
87 | * and marks all the affected blocks as valid and modified. If | |
88 | * an affected block is not allocated, it will be allocated. If | |
89 | * an affected block is not completely overwritten, and is not | |
90 | * valid before the operation, it will be read from disk before | |
91 | * being partially zeroed. | |
92 | */ | |
ef9d8733 | 93 | int |
dda35b8f CH |
94 | xfs_iozero( |
95 | struct xfs_inode *ip, /* inode */ | |
96 | loff_t pos, /* offset in file */ | |
97 | size_t count) /* size of data to zero */ | |
98 | { | |
99 | struct page *page; | |
100 | struct address_space *mapping; | |
101 | int status; | |
102 | ||
103 | mapping = VFS_I(ip)->i_mapping; | |
104 | do { | |
105 | unsigned offset, bytes; | |
106 | void *fsdata; | |
107 | ||
108 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
109 | bytes = PAGE_CACHE_SIZE - offset; | |
110 | if (bytes > count) | |
111 | bytes = count; | |
112 | ||
113 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
114 | AOP_FLAG_UNINTERRUPTIBLE, | |
115 | &page, &fsdata); | |
116 | if (status) | |
117 | break; | |
118 | ||
119 | zero_user(page, offset, bytes); | |
120 | ||
121 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
122 | page, fsdata); | |
123 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
124 | pos += bytes; | |
125 | count -= bytes; | |
126 | status = 0; | |
127 | } while (count); | |
128 | ||
129 | return (-status); | |
130 | } | |
131 | ||
1da2f2db CH |
132 | /* |
133 | * Fsync operations on directories are much simpler than on regular files, | |
134 | * as there is no file data to flush, and thus also no need for explicit | |
135 | * cache flush operations, and there are no non-transaction metadata updates | |
136 | * on directories either. | |
137 | */ | |
138 | STATIC int | |
139 | xfs_dir_fsync( | |
140 | struct file *file, | |
141 | loff_t start, | |
142 | loff_t end, | |
143 | int datasync) | |
144 | { | |
145 | struct xfs_inode *ip = XFS_I(file->f_mapping->host); | |
146 | struct xfs_mount *mp = ip->i_mount; | |
147 | xfs_lsn_t lsn = 0; | |
148 | ||
149 | trace_xfs_dir_fsync(ip); | |
150 | ||
151 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
152 | if (xfs_ipincount(ip)) | |
153 | lsn = ip->i_itemp->ili_last_lsn; | |
154 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
155 | ||
156 | if (!lsn) | |
157 | return 0; | |
158 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | |
159 | } | |
160 | ||
fd3200be CH |
161 | STATIC int |
162 | xfs_file_fsync( | |
163 | struct file *file, | |
02c24a82 JB |
164 | loff_t start, |
165 | loff_t end, | |
fd3200be CH |
166 | int datasync) |
167 | { | |
7ea80859 CH |
168 | struct inode *inode = file->f_mapping->host; |
169 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 170 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
171 | int error = 0; |
172 | int log_flushed = 0; | |
b1037058 | 173 | xfs_lsn_t lsn = 0; |
fd3200be | 174 | |
cca28fb8 | 175 | trace_xfs_file_fsync(ip); |
fd3200be | 176 | |
02c24a82 JB |
177 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
178 | if (error) | |
179 | return error; | |
180 | ||
a27a263b | 181 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
182 | return -XFS_ERROR(EIO); |
183 | ||
184 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
185 | ||
a27a263b CH |
186 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
187 | /* | |
188 | * If we have an RT and/or log subvolume we need to make sure | |
189 | * to flush the write cache the device used for file data | |
190 | * first. This is to ensure newly written file data make | |
191 | * it to disk before logging the new inode size in case of | |
192 | * an extending write. | |
193 | */ | |
194 | if (XFS_IS_REALTIME_INODE(ip)) | |
195 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
196 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
197 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
198 | } | |
199 | ||
fd3200be | 200 | /* |
8a9c9980 CH |
201 | * All metadata updates are logged, which means that we just have |
202 | * to flush the log up to the latest LSN that touched the inode. | |
fd3200be CH |
203 | */ |
204 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
8f639dde CH |
205 | if (xfs_ipincount(ip)) { |
206 | if (!datasync || | |
207 | (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) | |
208 | lsn = ip->i_itemp->ili_last_lsn; | |
209 | } | |
8a9c9980 | 210 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be | 211 | |
8a9c9980 | 212 | if (lsn) |
b1037058 CH |
213 | error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); |
214 | ||
a27a263b CH |
215 | /* |
216 | * If we only have a single device, and the log force about was | |
217 | * a no-op we might have to flush the data device cache here. | |
218 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
219 | * an already allocated file and thus do not have any metadata to | |
220 | * commit. | |
221 | */ | |
222 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
223 | mp->m_logdev_targp == mp->m_ddev_targp && | |
224 | !XFS_IS_REALTIME_INODE(ip) && | |
225 | !log_flushed) | |
226 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
227 | |
228 | return -error; | |
229 | } | |
230 | ||
00258e36 CH |
231 | STATIC ssize_t |
232 | xfs_file_aio_read( | |
dda35b8f CH |
233 | struct kiocb *iocb, |
234 | const struct iovec *iovp, | |
00258e36 CH |
235 | unsigned long nr_segs, |
236 | loff_t pos) | |
dda35b8f CH |
237 | { |
238 | struct file *file = iocb->ki_filp; | |
239 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
240 | struct xfs_inode *ip = XFS_I(inode); |
241 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
242 | size_t size = 0; |
243 | ssize_t ret = 0; | |
00258e36 | 244 | int ioflags = 0; |
dda35b8f | 245 | xfs_fsize_t n; |
dda35b8f | 246 | |
dda35b8f CH |
247 | XFS_STATS_INC(xs_read_calls); |
248 | ||
00258e36 CH |
249 | BUG_ON(iocb->ki_pos != pos); |
250 | ||
251 | if (unlikely(file->f_flags & O_DIRECT)) | |
252 | ioflags |= IO_ISDIRECT; | |
253 | if (file->f_mode & FMODE_NOCMTIME) | |
254 | ioflags |= IO_INVIS; | |
255 | ||
52764329 DC |
256 | ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE); |
257 | if (ret < 0) | |
258 | return ret; | |
dda35b8f CH |
259 | |
260 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
261 | xfs_buftarg_t *target = | |
262 | XFS_IS_REALTIME_INODE(ip) ? | |
263 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
7c71ee78 ES |
264 | /* DIO must be aligned to device logical sector size */ |
265 | if ((pos | size) & target->bt_logical_sectormask) { | |
fb595814 | 266 | if (pos == i_size_read(inode)) |
00258e36 | 267 | return 0; |
dda35b8f CH |
268 | return -XFS_ERROR(EINVAL); |
269 | } | |
270 | } | |
271 | ||
fb595814 | 272 | n = mp->m_super->s_maxbytes - pos; |
00258e36 | 273 | if (n <= 0 || size == 0) |
dda35b8f CH |
274 | return 0; |
275 | ||
276 | if (n < size) | |
277 | size = n; | |
278 | ||
279 | if (XFS_FORCED_SHUTDOWN(mp)) | |
280 | return -EIO; | |
281 | ||
0c38a251 DC |
282 | /* |
283 | * Locking is a bit tricky here. If we take an exclusive lock | |
284 | * for direct IO, we effectively serialise all new concurrent | |
285 | * read IO to this file and block it behind IO that is currently in | |
286 | * progress because IO in progress holds the IO lock shared. We only | |
287 | * need to hold the lock exclusive to blow away the page cache, so | |
288 | * only take lock exclusively if the page cache needs invalidation. | |
289 | * This allows the normal direct IO case of no page cache pages to | |
290 | * proceeed concurrently without serialisation. | |
291 | */ | |
292 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
293 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
294 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
295 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
296 | ||
00258e36 | 297 | if (inode->i_mapping->nrpages) { |
fb595814 DC |
298 | ret = -filemap_write_and_wait_range( |
299 | VFS_I(ip)->i_mapping, | |
300 | pos, -1); | |
487f84f3 DC |
301 | if (ret) { |
302 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
303 | return ret; | |
304 | } | |
fb595814 | 305 | truncate_pagecache_range(VFS_I(ip), pos, -1); |
00258e36 | 306 | } |
487f84f3 | 307 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 308 | } |
dda35b8f | 309 | |
fb595814 | 310 | trace_xfs_file_read(ip, size, pos, ioflags); |
dda35b8f | 311 | |
fb595814 | 312 | ret = generic_file_aio_read(iocb, iovp, nr_segs, pos); |
dda35b8f CH |
313 | if (ret > 0) |
314 | XFS_STATS_ADD(xs_read_bytes, ret); | |
315 | ||
487f84f3 | 316 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
317 | return ret; |
318 | } | |
319 | ||
00258e36 CH |
320 | STATIC ssize_t |
321 | xfs_file_splice_read( | |
dda35b8f CH |
322 | struct file *infilp, |
323 | loff_t *ppos, | |
324 | struct pipe_inode_info *pipe, | |
325 | size_t count, | |
00258e36 | 326 | unsigned int flags) |
dda35b8f | 327 | { |
00258e36 | 328 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 329 | int ioflags = 0; |
dda35b8f CH |
330 | ssize_t ret; |
331 | ||
332 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
333 | |
334 | if (infilp->f_mode & FMODE_NOCMTIME) | |
335 | ioflags |= IO_INVIS; | |
336 | ||
dda35b8f CH |
337 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
338 | return -EIO; | |
339 | ||
487f84f3 | 340 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 341 | |
dda35b8f CH |
342 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
343 | ||
344 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
345 | if (ret > 0) | |
346 | XFS_STATS_ADD(xs_read_bytes, ret); | |
347 | ||
487f84f3 | 348 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
349 | return ret; |
350 | } | |
351 | ||
487f84f3 DC |
352 | /* |
353 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
354 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
355 | * couuld cause lock inversions between the aio_write path and the splice path | |
356 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
357 | * writes to the same inode. The only real way to fix this is to re-implement | |
358 | * the generic code here with correct locking orders. | |
359 | */ | |
00258e36 CH |
360 | STATIC ssize_t |
361 | xfs_file_splice_write( | |
dda35b8f CH |
362 | struct pipe_inode_info *pipe, |
363 | struct file *outfilp, | |
364 | loff_t *ppos, | |
365 | size_t count, | |
00258e36 | 366 | unsigned int flags) |
dda35b8f | 367 | { |
dda35b8f | 368 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 369 | struct xfs_inode *ip = XFS_I(inode); |
00258e36 CH |
370 | int ioflags = 0; |
371 | ssize_t ret; | |
dda35b8f CH |
372 | |
373 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
374 | |
375 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
376 | ioflags |= IO_INVIS; | |
377 | ||
dda35b8f CH |
378 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
379 | return -EIO; | |
380 | ||
381 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
382 | ||
dda35b8f CH |
383 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); |
384 | ||
385 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
ce7ae151 CH |
386 | if (ret > 0) |
387 | XFS_STATS_ADD(xs_write_bytes, ret); | |
dda35b8f | 388 | |
dda35b8f CH |
389 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
390 | return ret; | |
391 | } | |
392 | ||
393 | /* | |
193aec10 CH |
394 | * This routine is called to handle zeroing any space in the last block of the |
395 | * file that is beyond the EOF. We do this since the size is being increased | |
396 | * without writing anything to that block and we don't want to read the | |
397 | * garbage on the disk. | |
dda35b8f CH |
398 | */ |
399 | STATIC int /* error (positive) */ | |
400 | xfs_zero_last_block( | |
193aec10 CH |
401 | struct xfs_inode *ip, |
402 | xfs_fsize_t offset, | |
403 | xfs_fsize_t isize) | |
dda35b8f | 404 | { |
193aec10 CH |
405 | struct xfs_mount *mp = ip->i_mount; |
406 | xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); | |
407 | int zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
408 | int zero_len; | |
409 | int nimaps = 1; | |
410 | int error = 0; | |
411 | struct xfs_bmbt_irec imap; | |
dda35b8f | 412 | |
193aec10 | 413 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 414 | error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); |
193aec10 | 415 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 416 | if (error) |
dda35b8f | 417 | return error; |
193aec10 | 418 | |
dda35b8f | 419 | ASSERT(nimaps > 0); |
193aec10 | 420 | |
dda35b8f CH |
421 | /* |
422 | * If the block underlying isize is just a hole, then there | |
423 | * is nothing to zero. | |
424 | */ | |
193aec10 | 425 | if (imap.br_startblock == HOLESTARTBLOCK) |
dda35b8f | 426 | return 0; |
dda35b8f CH |
427 | |
428 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
429 | if (isize + zero_len > offset) | |
430 | zero_len = offset - isize; | |
193aec10 | 431 | return xfs_iozero(ip, isize, zero_len); |
dda35b8f CH |
432 | } |
433 | ||
434 | /* | |
193aec10 CH |
435 | * Zero any on disk space between the current EOF and the new, larger EOF. |
436 | * | |
437 | * This handles the normal case of zeroing the remainder of the last block in | |
438 | * the file and the unusual case of zeroing blocks out beyond the size of the | |
439 | * file. This second case only happens with fixed size extents and when the | |
440 | * system crashes before the inode size was updated but after blocks were | |
441 | * allocated. | |
442 | * | |
443 | * Expects the iolock to be held exclusive, and will take the ilock internally. | |
dda35b8f | 444 | */ |
dda35b8f CH |
445 | int /* error (positive) */ |
446 | xfs_zero_eof( | |
193aec10 CH |
447 | struct xfs_inode *ip, |
448 | xfs_off_t offset, /* starting I/O offset */ | |
449 | xfs_fsize_t isize) /* current inode size */ | |
dda35b8f | 450 | { |
193aec10 CH |
451 | struct xfs_mount *mp = ip->i_mount; |
452 | xfs_fileoff_t start_zero_fsb; | |
453 | xfs_fileoff_t end_zero_fsb; | |
454 | xfs_fileoff_t zero_count_fsb; | |
455 | xfs_fileoff_t last_fsb; | |
456 | xfs_fileoff_t zero_off; | |
457 | xfs_fsize_t zero_len; | |
458 | int nimaps; | |
459 | int error = 0; | |
460 | struct xfs_bmbt_irec imap; | |
461 | ||
462 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
dda35b8f CH |
463 | ASSERT(offset > isize); |
464 | ||
465 | /* | |
466 | * First handle zeroing the block on which isize resides. | |
193aec10 | 467 | * |
dda35b8f CH |
468 | * We only zero a part of that block so it is handled specially. |
469 | */ | |
193aec10 CH |
470 | if (XFS_B_FSB_OFFSET(mp, isize) != 0) { |
471 | error = xfs_zero_last_block(ip, offset, isize); | |
472 | if (error) | |
473 | return error; | |
dda35b8f CH |
474 | } |
475 | ||
476 | /* | |
193aec10 CH |
477 | * Calculate the range between the new size and the old where blocks |
478 | * needing to be zeroed may exist. | |
479 | * | |
480 | * To get the block where the last byte in the file currently resides, | |
481 | * we need to subtract one from the size and truncate back to a block | |
482 | * boundary. We subtract 1 in case the size is exactly on a block | |
483 | * boundary. | |
dda35b8f CH |
484 | */ |
485 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
486 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
487 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
488 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
489 | if (last_fsb == end_zero_fsb) { | |
490 | /* | |
491 | * The size was only incremented on its last block. | |
492 | * We took care of that above, so just return. | |
493 | */ | |
494 | return 0; | |
495 | } | |
496 | ||
497 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
498 | while (start_zero_fsb <= end_zero_fsb) { | |
499 | nimaps = 1; | |
500 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
193aec10 CH |
501 | |
502 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
5c8ed202 DC |
503 | error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, |
504 | &imap, &nimaps, 0); | |
193aec10 CH |
505 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
506 | if (error) | |
dda35b8f | 507 | return error; |
193aec10 | 508 | |
dda35b8f CH |
509 | ASSERT(nimaps > 0); |
510 | ||
511 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
512 | imap.br_startblock == HOLESTARTBLOCK) { | |
dda35b8f CH |
513 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
514 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
515 | continue; | |
516 | } | |
517 | ||
518 | /* | |
519 | * There are blocks we need to zero. | |
dda35b8f | 520 | */ |
dda35b8f CH |
521 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); |
522 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
523 | ||
524 | if ((zero_off + zero_len) > offset) | |
525 | zero_len = offset - zero_off; | |
526 | ||
527 | error = xfs_iozero(ip, zero_off, zero_len); | |
193aec10 CH |
528 | if (error) |
529 | return error; | |
dda35b8f CH |
530 | |
531 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
532 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
dda35b8f CH |
533 | } |
534 | ||
535 | return 0; | |
dda35b8f CH |
536 | } |
537 | ||
4d8d1581 DC |
538 | /* |
539 | * Common pre-write limit and setup checks. | |
540 | * | |
5bf1f262 CH |
541 | * Called with the iolocked held either shared and exclusive according to |
542 | * @iolock, and returns with it held. Might upgrade the iolock to exclusive | |
543 | * if called for a direct write beyond i_size. | |
4d8d1581 DC |
544 | */ |
545 | STATIC ssize_t | |
546 | xfs_file_aio_write_checks( | |
547 | struct file *file, | |
548 | loff_t *pos, | |
549 | size_t *count, | |
550 | int *iolock) | |
551 | { | |
552 | struct inode *inode = file->f_mapping->host; | |
553 | struct xfs_inode *ip = XFS_I(inode); | |
4d8d1581 DC |
554 | int error = 0; |
555 | ||
7271d243 | 556 | restart: |
4d8d1581 | 557 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
467f7899 | 558 | if (error) |
4d8d1581 | 559 | return error; |
4d8d1581 | 560 | |
4d8d1581 DC |
561 | /* |
562 | * If the offset is beyond the size of the file, we need to zero any | |
563 | * blocks that fall between the existing EOF and the start of this | |
2813d682 | 564 | * write. If zeroing is needed and we are currently holding the |
467f7899 CH |
565 | * iolock shared, we need to update it to exclusive which implies |
566 | * having to redo all checks before. | |
4d8d1581 | 567 | */ |
2813d682 | 568 | if (*pos > i_size_read(inode)) { |
7271d243 | 569 | if (*iolock == XFS_IOLOCK_SHARED) { |
467f7899 | 570 | xfs_rw_iunlock(ip, *iolock); |
7271d243 | 571 | *iolock = XFS_IOLOCK_EXCL; |
467f7899 | 572 | xfs_rw_ilock(ip, *iolock); |
7271d243 DC |
573 | goto restart; |
574 | } | |
ce7ae151 | 575 | error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); |
467f7899 CH |
576 | if (error) |
577 | return error; | |
7271d243 | 578 | } |
4d8d1581 | 579 | |
8a9c9980 CH |
580 | /* |
581 | * Updating the timestamps will grab the ilock again from | |
582 | * xfs_fs_dirty_inode, so we have to call it after dropping the | |
583 | * lock above. Eventually we should look into a way to avoid | |
584 | * the pointless lock roundtrip. | |
585 | */ | |
c3b2da31 JB |
586 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) { |
587 | error = file_update_time(file); | |
588 | if (error) | |
589 | return error; | |
590 | } | |
8a9c9980 | 591 | |
4d8d1581 DC |
592 | /* |
593 | * If we're writing the file then make sure to clear the setuid and | |
594 | * setgid bits if the process is not being run by root. This keeps | |
595 | * people from modifying setuid and setgid binaries. | |
596 | */ | |
597 | return file_remove_suid(file); | |
4d8d1581 DC |
598 | } |
599 | ||
f0d26e86 DC |
600 | /* |
601 | * xfs_file_dio_aio_write - handle direct IO writes | |
602 | * | |
603 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 604 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
605 | * follow locking changes and looping. |
606 | * | |
eda77982 DC |
607 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
608 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
609 | * pages are flushed out. | |
610 | * | |
611 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
612 | * allowing them to be done in parallel with reads and other direct IO writes. | |
613 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
614 | * needs to do sub-block zeroing and that requires serialisation against other | |
615 | * direct IOs to the same block. In this case we need to serialise the | |
616 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
617 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
618 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
619 | * before we try to map the overlapping block. This is currently implemented by | |
4a06fd26 | 620 | * hitting it with a big hammer (i.e. inode_dio_wait()). |
eda77982 | 621 | * |
f0d26e86 DC |
622 | * Returns with locks held indicated by @iolock and errors indicated by |
623 | * negative return values. | |
624 | */ | |
625 | STATIC ssize_t | |
626 | xfs_file_dio_aio_write( | |
627 | struct kiocb *iocb, | |
628 | const struct iovec *iovp, | |
629 | unsigned long nr_segs, | |
630 | loff_t pos, | |
d0606464 | 631 | size_t ocount) |
f0d26e86 DC |
632 | { |
633 | struct file *file = iocb->ki_filp; | |
634 | struct address_space *mapping = file->f_mapping; | |
635 | struct inode *inode = mapping->host; | |
636 | struct xfs_inode *ip = XFS_I(inode); | |
637 | struct xfs_mount *mp = ip->i_mount; | |
638 | ssize_t ret = 0; | |
f0d26e86 | 639 | size_t count = ocount; |
eda77982 | 640 | int unaligned_io = 0; |
d0606464 | 641 | int iolock; |
f0d26e86 DC |
642 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
643 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
644 | ||
7c71ee78 ES |
645 | /* DIO must be aligned to device logical sector size */ |
646 | if ((pos | count) & target->bt_logical_sectormask) | |
f0d26e86 DC |
647 | return -XFS_ERROR(EINVAL); |
648 | ||
7c71ee78 | 649 | /* "unaligned" here means not aligned to a filesystem block */ |
eda77982 DC |
650 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
651 | unaligned_io = 1; | |
652 | ||
7271d243 DC |
653 | /* |
654 | * We don't need to take an exclusive lock unless there page cache needs | |
655 | * to be invalidated or unaligned IO is being executed. We don't need to | |
656 | * consider the EOF extension case here because | |
657 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
658 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
659 | */ | |
660 | if (unaligned_io || mapping->nrpages) | |
d0606464 | 661 | iolock = XFS_IOLOCK_EXCL; |
f0d26e86 | 662 | else |
d0606464 CH |
663 | iolock = XFS_IOLOCK_SHARED; |
664 | xfs_rw_ilock(ip, iolock); | |
c58cb165 CH |
665 | |
666 | /* | |
667 | * Recheck if there are cached pages that need invalidate after we got | |
668 | * the iolock to protect against other threads adding new pages while | |
669 | * we were waiting for the iolock. | |
670 | */ | |
d0606464 CH |
671 | if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { |
672 | xfs_rw_iunlock(ip, iolock); | |
673 | iolock = XFS_IOLOCK_EXCL; | |
674 | xfs_rw_ilock(ip, iolock); | |
c58cb165 | 675 | } |
f0d26e86 | 676 | |
d0606464 | 677 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 678 | if (ret) |
d0606464 | 679 | goto out; |
f0d26e86 DC |
680 | |
681 | if (mapping->nrpages) { | |
fb595814 DC |
682 | ret = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
683 | pos, -1); | |
f0d26e86 | 684 | if (ret) |
d0606464 | 685 | goto out; |
fb595814 | 686 | truncate_pagecache_range(VFS_I(ip), pos, -1); |
f0d26e86 DC |
687 | } |
688 | ||
eda77982 DC |
689 | /* |
690 | * If we are doing unaligned IO, wait for all other IO to drain, | |
691 | * otherwise demote the lock if we had to flush cached pages | |
692 | */ | |
693 | if (unaligned_io) | |
4a06fd26 | 694 | inode_dio_wait(inode); |
d0606464 | 695 | else if (iolock == XFS_IOLOCK_EXCL) { |
f0d26e86 | 696 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
d0606464 | 697 | iolock = XFS_IOLOCK_SHARED; |
f0d26e86 DC |
698 | } |
699 | ||
700 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
701 | ret = generic_file_direct_write(iocb, iovp, | |
702 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
703 | ||
d0606464 CH |
704 | out: |
705 | xfs_rw_iunlock(ip, iolock); | |
706 | ||
f0d26e86 DC |
707 | /* No fallback to buffered IO on errors for XFS. */ |
708 | ASSERT(ret < 0 || ret == count); | |
709 | return ret; | |
710 | } | |
711 | ||
00258e36 | 712 | STATIC ssize_t |
637bbc75 | 713 | xfs_file_buffered_aio_write( |
dda35b8f CH |
714 | struct kiocb *iocb, |
715 | const struct iovec *iovp, | |
00258e36 | 716 | unsigned long nr_segs, |
637bbc75 | 717 | loff_t pos, |
d0606464 | 718 | size_t ocount) |
dda35b8f CH |
719 | { |
720 | struct file *file = iocb->ki_filp; | |
721 | struct address_space *mapping = file->f_mapping; | |
722 | struct inode *inode = mapping->host; | |
00258e36 | 723 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
724 | ssize_t ret; |
725 | int enospc = 0; | |
d0606464 | 726 | int iolock = XFS_IOLOCK_EXCL; |
637bbc75 | 727 | size_t count = ocount; |
dda35b8f | 728 | |
d0606464 | 729 | xfs_rw_ilock(ip, iolock); |
dda35b8f | 730 | |
d0606464 | 731 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 732 | if (ret) |
d0606464 | 733 | goto out; |
dda35b8f CH |
734 | |
735 | /* We can write back this queue in page reclaim */ | |
736 | current->backing_dev_info = mapping->backing_dev_info; | |
737 | ||
dda35b8f | 738 | write_retry: |
637bbc75 DC |
739 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
740 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
9aa05000 DC |
741 | pos, &iocb->ki_pos, count, 0); |
742 | ||
637bbc75 | 743 | /* |
9aa05000 DC |
744 | * If we just got an ENOSPC, try to write back all dirty inodes to |
745 | * convert delalloc space to free up some of the excess reserved | |
746 | * metadata space. | |
637bbc75 DC |
747 | */ |
748 | if (ret == -ENOSPC && !enospc) { | |
637bbc75 | 749 | enospc = 1; |
9aa05000 DC |
750 | xfs_flush_inodes(ip->i_mount); |
751 | goto write_retry; | |
dda35b8f | 752 | } |
d0606464 | 753 | |
dda35b8f | 754 | current->backing_dev_info = NULL; |
d0606464 CH |
755 | out: |
756 | xfs_rw_iunlock(ip, iolock); | |
637bbc75 DC |
757 | return ret; |
758 | } | |
759 | ||
760 | STATIC ssize_t | |
761 | xfs_file_aio_write( | |
762 | struct kiocb *iocb, | |
763 | const struct iovec *iovp, | |
764 | unsigned long nr_segs, | |
765 | loff_t pos) | |
766 | { | |
767 | struct file *file = iocb->ki_filp; | |
768 | struct address_space *mapping = file->f_mapping; | |
769 | struct inode *inode = mapping->host; | |
770 | struct xfs_inode *ip = XFS_I(inode); | |
771 | ssize_t ret; | |
637bbc75 DC |
772 | size_t ocount = 0; |
773 | ||
774 | XFS_STATS_INC(xs_write_calls); | |
775 | ||
776 | BUG_ON(iocb->ki_pos != pos); | |
777 | ||
778 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
779 | if (ret) | |
780 | return ret; | |
781 | ||
782 | if (ocount == 0) | |
783 | return 0; | |
784 | ||
d9457dc0 JK |
785 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
786 | ret = -EIO; | |
787 | goto out; | |
788 | } | |
637bbc75 DC |
789 | |
790 | if (unlikely(file->f_flags & O_DIRECT)) | |
d0606464 | 791 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); |
637bbc75 DC |
792 | else |
793 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
d0606464 | 794 | ocount); |
dda35b8f | 795 | |
d0606464 CH |
796 | if (ret > 0) { |
797 | ssize_t err; | |
dda35b8f | 798 | |
d0606464 | 799 | XFS_STATS_ADD(xs_write_bytes, ret); |
dda35b8f | 800 | |
d0606464 CH |
801 | /* Handle various SYNC-type writes */ |
802 | err = generic_write_sync(file, pos, ret); | |
803 | if (err < 0) | |
804 | ret = err; | |
dda35b8f CH |
805 | } |
806 | ||
d9457dc0 | 807 | out: |
a363f0c2 | 808 | return ret; |
dda35b8f CH |
809 | } |
810 | ||
2fe17c10 CH |
811 | STATIC long |
812 | xfs_file_fallocate( | |
83aee9e4 CH |
813 | struct file *file, |
814 | int mode, | |
815 | loff_t offset, | |
816 | loff_t len) | |
2fe17c10 | 817 | { |
83aee9e4 CH |
818 | struct inode *inode = file_inode(file); |
819 | struct xfs_inode *ip = XFS_I(inode); | |
820 | struct xfs_trans *tp; | |
821 | long error; | |
822 | loff_t new_size = 0; | |
2fe17c10 | 823 | |
83aee9e4 CH |
824 | if (!S_ISREG(inode->i_mode)) |
825 | return -EINVAL; | |
2fe17c10 CH |
826 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
827 | return -EOPNOTSUPP; | |
828 | ||
2fe17c10 | 829 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
83aee9e4 CH |
830 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
831 | error = xfs_free_file_space(ip, offset, len); | |
832 | if (error) | |
833 | goto out_unlock; | |
834 | } else { | |
835 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
836 | offset + len > i_size_read(inode)) { | |
837 | new_size = offset + len; | |
838 | error = -inode_newsize_ok(inode, new_size); | |
839 | if (error) | |
840 | goto out_unlock; | |
841 | } | |
2fe17c10 | 842 | |
83aee9e4 CH |
843 | error = xfs_alloc_file_space(ip, offset, len, |
844 | XFS_BMAPI_PREALLOC); | |
2fe17c10 CH |
845 | if (error) |
846 | goto out_unlock; | |
847 | } | |
848 | ||
83aee9e4 CH |
849 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); |
850 | error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); | |
851 | if (error) { | |
852 | xfs_trans_cancel(tp, 0); | |
853 | goto out_unlock; | |
854 | } | |
855 | ||
856 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
857 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | |
858 | ip->i_d.di_mode &= ~S_ISUID; | |
859 | if (ip->i_d.di_mode & S_IXGRP) | |
860 | ip->i_d.di_mode &= ~S_ISGID; | |
82878897 | 861 | |
83aee9e4 CH |
862 | if (!(mode & FALLOC_FL_PUNCH_HOLE)) |
863 | ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; | |
864 | ||
865 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
866 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
867 | ||
868 | if (file->f_flags & O_DSYNC) | |
869 | xfs_trans_set_sync(tp); | |
870 | error = xfs_trans_commit(tp, 0); | |
2fe17c10 CH |
871 | if (error) |
872 | goto out_unlock; | |
873 | ||
874 | /* Change file size if needed */ | |
875 | if (new_size) { | |
876 | struct iattr iattr; | |
877 | ||
878 | iattr.ia_valid = ATTR_SIZE; | |
879 | iattr.ia_size = new_size; | |
83aee9e4 | 880 | error = xfs_setattr_size(ip, &iattr); |
2fe17c10 CH |
881 | } |
882 | ||
883 | out_unlock: | |
884 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
83aee9e4 | 885 | return -error; |
2fe17c10 CH |
886 | } |
887 | ||
888 | ||
1da177e4 | 889 | STATIC int |
3562fd45 | 890 | xfs_file_open( |
1da177e4 | 891 | struct inode *inode, |
f999a5bf | 892 | struct file *file) |
1da177e4 | 893 | { |
f999a5bf | 894 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 895 | return -EFBIG; |
f999a5bf CH |
896 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
897 | return -EIO; | |
898 | return 0; | |
899 | } | |
900 | ||
901 | STATIC int | |
902 | xfs_dir_open( | |
903 | struct inode *inode, | |
904 | struct file *file) | |
905 | { | |
906 | struct xfs_inode *ip = XFS_I(inode); | |
907 | int mode; | |
908 | int error; | |
909 | ||
910 | error = xfs_file_open(inode, file); | |
911 | if (error) | |
912 | return error; | |
913 | ||
914 | /* | |
915 | * If there are any blocks, read-ahead block 0 as we're almost | |
916 | * certain to have the next operation be a read there. | |
917 | */ | |
309ecac8 | 918 | mode = xfs_ilock_data_map_shared(ip); |
f999a5bf | 919 | if (ip->i_d.di_nextents > 0) |
33363fee | 920 | xfs_dir3_data_readahead(NULL, ip, 0, -1); |
f999a5bf CH |
921 | xfs_iunlock(ip, mode); |
922 | return 0; | |
1da177e4 LT |
923 | } |
924 | ||
1da177e4 | 925 | STATIC int |
3562fd45 | 926 | xfs_file_release( |
1da177e4 LT |
927 | struct inode *inode, |
928 | struct file *filp) | |
929 | { | |
739bfb2a | 930 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
931 | } |
932 | ||
1da177e4 | 933 | STATIC int |
3562fd45 | 934 | xfs_file_readdir( |
b8227554 AV |
935 | struct file *file, |
936 | struct dir_context *ctx) | |
1da177e4 | 937 | { |
b8227554 | 938 | struct inode *inode = file_inode(file); |
739bfb2a | 939 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
940 | int error; |
941 | size_t bufsize; | |
942 | ||
943 | /* | |
944 | * The Linux API doesn't pass down the total size of the buffer | |
945 | * we read into down to the filesystem. With the filldir concept | |
946 | * it's not needed for correct information, but the XFS dir2 leaf | |
947 | * code wants an estimate of the buffer size to calculate it's | |
948 | * readahead window and size the buffers used for mapping to | |
949 | * physical blocks. | |
950 | * | |
951 | * Try to give it an estimate that's good enough, maybe at some | |
952 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 953 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 954 | */ |
a9cc799e | 955 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 956 | |
b8227554 | 957 | error = xfs_readdir(ip, ctx, bufsize); |
051e7cd4 CH |
958 | if (error) |
959 | return -error; | |
960 | return 0; | |
1da177e4 LT |
961 | } |
962 | ||
1da177e4 | 963 | STATIC int |
3562fd45 | 964 | xfs_file_mmap( |
1da177e4 LT |
965 | struct file *filp, |
966 | struct vm_area_struct *vma) | |
967 | { | |
3562fd45 | 968 | vma->vm_ops = &xfs_file_vm_ops; |
6fac0cb4 | 969 | |
fbc1462b | 970 | file_accessed(filp); |
1da177e4 LT |
971 | return 0; |
972 | } | |
973 | ||
4f57dbc6 DC |
974 | /* |
975 | * mmap()d file has taken write protection fault and is being made | |
976 | * writable. We can set the page state up correctly for a writable | |
977 | * page, which means we can do correct delalloc accounting (ENOSPC | |
978 | * checking!) and unwritten extent mapping. | |
979 | */ | |
980 | STATIC int | |
981 | xfs_vm_page_mkwrite( | |
982 | struct vm_area_struct *vma, | |
c2ec175c | 983 | struct vm_fault *vmf) |
4f57dbc6 | 984 | { |
c2ec175c | 985 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
986 | } |
987 | ||
d126d43f JL |
988 | /* |
989 | * This type is designed to indicate the type of offset we would like | |
990 | * to search from page cache for either xfs_seek_data() or xfs_seek_hole(). | |
991 | */ | |
992 | enum { | |
993 | HOLE_OFF = 0, | |
994 | DATA_OFF, | |
995 | }; | |
996 | ||
997 | /* | |
998 | * Lookup the desired type of offset from the given page. | |
999 | * | |
1000 | * On success, return true and the offset argument will point to the | |
1001 | * start of the region that was found. Otherwise this function will | |
1002 | * return false and keep the offset argument unchanged. | |
1003 | */ | |
1004 | STATIC bool | |
1005 | xfs_lookup_buffer_offset( | |
1006 | struct page *page, | |
1007 | loff_t *offset, | |
1008 | unsigned int type) | |
1009 | { | |
1010 | loff_t lastoff = page_offset(page); | |
1011 | bool found = false; | |
1012 | struct buffer_head *bh, *head; | |
1013 | ||
1014 | bh = head = page_buffers(page); | |
1015 | do { | |
1016 | /* | |
1017 | * Unwritten extents that have data in the page | |
1018 | * cache covering them can be identified by the | |
1019 | * BH_Unwritten state flag. Pages with multiple | |
1020 | * buffers might have a mix of holes, data and | |
1021 | * unwritten extents - any buffer with valid | |
1022 | * data in it should have BH_Uptodate flag set | |
1023 | * on it. | |
1024 | */ | |
1025 | if (buffer_unwritten(bh) || | |
1026 | buffer_uptodate(bh)) { | |
1027 | if (type == DATA_OFF) | |
1028 | found = true; | |
1029 | } else { | |
1030 | if (type == HOLE_OFF) | |
1031 | found = true; | |
1032 | } | |
1033 | ||
1034 | if (found) { | |
1035 | *offset = lastoff; | |
1036 | break; | |
1037 | } | |
1038 | lastoff += bh->b_size; | |
1039 | } while ((bh = bh->b_this_page) != head); | |
1040 | ||
1041 | return found; | |
1042 | } | |
1043 | ||
1044 | /* | |
1045 | * This routine is called to find out and return a data or hole offset | |
1046 | * from the page cache for unwritten extents according to the desired | |
1047 | * type for xfs_seek_data() or xfs_seek_hole(). | |
1048 | * | |
1049 | * The argument offset is used to tell where we start to search from the | |
1050 | * page cache. Map is used to figure out the end points of the range to | |
1051 | * lookup pages. | |
1052 | * | |
1053 | * Return true if the desired type of offset was found, and the argument | |
1054 | * offset is filled with that address. Otherwise, return false and keep | |
1055 | * offset unchanged. | |
1056 | */ | |
1057 | STATIC bool | |
1058 | xfs_find_get_desired_pgoff( | |
1059 | struct inode *inode, | |
1060 | struct xfs_bmbt_irec *map, | |
1061 | unsigned int type, | |
1062 | loff_t *offset) | |
1063 | { | |
1064 | struct xfs_inode *ip = XFS_I(inode); | |
1065 | struct xfs_mount *mp = ip->i_mount; | |
1066 | struct pagevec pvec; | |
1067 | pgoff_t index; | |
1068 | pgoff_t end; | |
1069 | loff_t endoff; | |
1070 | loff_t startoff = *offset; | |
1071 | loff_t lastoff = startoff; | |
1072 | bool found = false; | |
1073 | ||
1074 | pagevec_init(&pvec, 0); | |
1075 | ||
1076 | index = startoff >> PAGE_CACHE_SHIFT; | |
1077 | endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); | |
1078 | end = endoff >> PAGE_CACHE_SHIFT; | |
1079 | do { | |
1080 | int want; | |
1081 | unsigned nr_pages; | |
1082 | unsigned int i; | |
1083 | ||
1084 | want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); | |
1085 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, | |
1086 | want); | |
1087 | /* | |
1088 | * No page mapped into given range. If we are searching holes | |
1089 | * and if this is the first time we got into the loop, it means | |
1090 | * that the given offset is landed in a hole, return it. | |
1091 | * | |
1092 | * If we have already stepped through some block buffers to find | |
1093 | * holes but they all contains data. In this case, the last | |
1094 | * offset is already updated and pointed to the end of the last | |
1095 | * mapped page, if it does not reach the endpoint to search, | |
1096 | * that means there should be a hole between them. | |
1097 | */ | |
1098 | if (nr_pages == 0) { | |
1099 | /* Data search found nothing */ | |
1100 | if (type == DATA_OFF) | |
1101 | break; | |
1102 | ||
1103 | ASSERT(type == HOLE_OFF); | |
1104 | if (lastoff == startoff || lastoff < endoff) { | |
1105 | found = true; | |
1106 | *offset = lastoff; | |
1107 | } | |
1108 | break; | |
1109 | } | |
1110 | ||
1111 | /* | |
1112 | * At lease we found one page. If this is the first time we | |
1113 | * step into the loop, and if the first page index offset is | |
1114 | * greater than the given search offset, a hole was found. | |
1115 | */ | |
1116 | if (type == HOLE_OFF && lastoff == startoff && | |
1117 | lastoff < page_offset(pvec.pages[0])) { | |
1118 | found = true; | |
1119 | break; | |
1120 | } | |
1121 | ||
1122 | for (i = 0; i < nr_pages; i++) { | |
1123 | struct page *page = pvec.pages[i]; | |
1124 | loff_t b_offset; | |
1125 | ||
1126 | /* | |
1127 | * At this point, the page may be truncated or | |
1128 | * invalidated (changing page->mapping to NULL), | |
1129 | * or even swizzled back from swapper_space to tmpfs | |
1130 | * file mapping. However, page->index will not change | |
1131 | * because we have a reference on the page. | |
1132 | * | |
1133 | * Searching done if the page index is out of range. | |
1134 | * If the current offset is not reaches the end of | |
1135 | * the specified search range, there should be a hole | |
1136 | * between them. | |
1137 | */ | |
1138 | if (page->index > end) { | |
1139 | if (type == HOLE_OFF && lastoff < endoff) { | |
1140 | *offset = lastoff; | |
1141 | found = true; | |
1142 | } | |
1143 | goto out; | |
1144 | } | |
1145 | ||
1146 | lock_page(page); | |
1147 | /* | |
1148 | * Page truncated or invalidated(page->mapping == NULL). | |
1149 | * We can freely skip it and proceed to check the next | |
1150 | * page. | |
1151 | */ | |
1152 | if (unlikely(page->mapping != inode->i_mapping)) { | |
1153 | unlock_page(page); | |
1154 | continue; | |
1155 | } | |
1156 | ||
1157 | if (!page_has_buffers(page)) { | |
1158 | unlock_page(page); | |
1159 | continue; | |
1160 | } | |
1161 | ||
1162 | found = xfs_lookup_buffer_offset(page, &b_offset, type); | |
1163 | if (found) { | |
1164 | /* | |
1165 | * The found offset may be less than the start | |
1166 | * point to search if this is the first time to | |
1167 | * come here. | |
1168 | */ | |
1169 | *offset = max_t(loff_t, startoff, b_offset); | |
1170 | unlock_page(page); | |
1171 | goto out; | |
1172 | } | |
1173 | ||
1174 | /* | |
1175 | * We either searching data but nothing was found, or | |
1176 | * searching hole but found a data buffer. In either | |
1177 | * case, probably the next page contains the desired | |
1178 | * things, update the last offset to it so. | |
1179 | */ | |
1180 | lastoff = page_offset(page) + PAGE_SIZE; | |
1181 | unlock_page(page); | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * The number of returned pages less than our desired, search | |
1186 | * done. In this case, nothing was found for searching data, | |
1187 | * but we found a hole behind the last offset. | |
1188 | */ | |
1189 | if (nr_pages < want) { | |
1190 | if (type == HOLE_OFF) { | |
1191 | *offset = lastoff; | |
1192 | found = true; | |
1193 | } | |
1194 | break; | |
1195 | } | |
1196 | ||
1197 | index = pvec.pages[i - 1]->index + 1; | |
1198 | pagevec_release(&pvec); | |
1199 | } while (index <= end); | |
1200 | ||
1201 | out: | |
1202 | pagevec_release(&pvec); | |
1203 | return found; | |
1204 | } | |
1205 | ||
3fe3e6b1 JL |
1206 | STATIC loff_t |
1207 | xfs_seek_data( | |
1208 | struct file *file, | |
834ab122 | 1209 | loff_t start) |
3fe3e6b1 JL |
1210 | { |
1211 | struct inode *inode = file->f_mapping->host; | |
1212 | struct xfs_inode *ip = XFS_I(inode); | |
1213 | struct xfs_mount *mp = ip->i_mount; | |
3fe3e6b1 JL |
1214 | loff_t uninitialized_var(offset); |
1215 | xfs_fsize_t isize; | |
1216 | xfs_fileoff_t fsbno; | |
1217 | xfs_filblks_t end; | |
1218 | uint lock; | |
1219 | int error; | |
1220 | ||
309ecac8 | 1221 | lock = xfs_ilock_data_map_shared(ip); |
3fe3e6b1 JL |
1222 | |
1223 | isize = i_size_read(inode); | |
1224 | if (start >= isize) { | |
1225 | error = ENXIO; | |
1226 | goto out_unlock; | |
1227 | } | |
1228 | ||
3fe3e6b1 JL |
1229 | /* |
1230 | * Try to read extents from the first block indicated | |
1231 | * by fsbno to the end block of the file. | |
1232 | */ | |
52f1acc8 | 1233 | fsbno = XFS_B_TO_FSBT(mp, start); |
3fe3e6b1 | 1234 | end = XFS_B_TO_FSB(mp, isize); |
52f1acc8 JL |
1235 | for (;;) { |
1236 | struct xfs_bmbt_irec map[2]; | |
1237 | int nmap = 2; | |
1238 | unsigned int i; | |
3fe3e6b1 | 1239 | |
52f1acc8 JL |
1240 | error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, |
1241 | XFS_BMAPI_ENTIRE); | |
1242 | if (error) | |
1243 | goto out_unlock; | |
3fe3e6b1 | 1244 | |
52f1acc8 JL |
1245 | /* No extents at given offset, must be beyond EOF */ |
1246 | if (nmap == 0) { | |
1247 | error = ENXIO; | |
1248 | goto out_unlock; | |
1249 | } | |
1250 | ||
1251 | for (i = 0; i < nmap; i++) { | |
1252 | offset = max_t(loff_t, start, | |
1253 | XFS_FSB_TO_B(mp, map[i].br_startoff)); | |
1254 | ||
1255 | /* Landed in a data extent */ | |
1256 | if (map[i].br_startblock == DELAYSTARTBLOCK || | |
1257 | (map[i].br_state == XFS_EXT_NORM && | |
1258 | !isnullstartblock(map[i].br_startblock))) | |
1259 | goto out; | |
1260 | ||
1261 | /* | |
1262 | * Landed in an unwritten extent, try to search data | |
1263 | * from page cache. | |
1264 | */ | |
1265 | if (map[i].br_state == XFS_EXT_UNWRITTEN) { | |
1266 | if (xfs_find_get_desired_pgoff(inode, &map[i], | |
1267 | DATA_OFF, &offset)) | |
1268 | goto out; | |
1269 | } | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * map[0] is hole or its an unwritten extent but | |
1274 | * without data in page cache. Probably means that | |
1275 | * we are reading after EOF if nothing in map[1]. | |
1276 | */ | |
3fe3e6b1 JL |
1277 | if (nmap == 1) { |
1278 | error = ENXIO; | |
1279 | goto out_unlock; | |
1280 | } | |
1281 | ||
52f1acc8 JL |
1282 | ASSERT(i > 1); |
1283 | ||
1284 | /* | |
1285 | * Nothing was found, proceed to the next round of search | |
1286 | * if reading offset not beyond or hit EOF. | |
1287 | */ | |
1288 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; | |
1289 | start = XFS_FSB_TO_B(mp, fsbno); | |
1290 | if (start >= isize) { | |
1291 | error = ENXIO; | |
1292 | goto out_unlock; | |
1293 | } | |
3fe3e6b1 JL |
1294 | } |
1295 | ||
52f1acc8 | 1296 | out: |
46a1c2c7 | 1297 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
3fe3e6b1 JL |
1298 | |
1299 | out_unlock: | |
01f4f327 | 1300 | xfs_iunlock(ip, lock); |
3fe3e6b1 JL |
1301 | |
1302 | if (error) | |
1303 | return -error; | |
1304 | return offset; | |
1305 | } | |
1306 | ||
1307 | STATIC loff_t | |
1308 | xfs_seek_hole( | |
1309 | struct file *file, | |
834ab122 | 1310 | loff_t start) |
3fe3e6b1 JL |
1311 | { |
1312 | struct inode *inode = file->f_mapping->host; | |
1313 | struct xfs_inode *ip = XFS_I(inode); | |
1314 | struct xfs_mount *mp = ip->i_mount; | |
1315 | loff_t uninitialized_var(offset); | |
3fe3e6b1 JL |
1316 | xfs_fsize_t isize; |
1317 | xfs_fileoff_t fsbno; | |
b686d1f7 | 1318 | xfs_filblks_t end; |
3fe3e6b1 JL |
1319 | uint lock; |
1320 | int error; | |
1321 | ||
1322 | if (XFS_FORCED_SHUTDOWN(mp)) | |
1323 | return -XFS_ERROR(EIO); | |
1324 | ||
309ecac8 | 1325 | lock = xfs_ilock_data_map_shared(ip); |
3fe3e6b1 JL |
1326 | |
1327 | isize = i_size_read(inode); | |
1328 | if (start >= isize) { | |
1329 | error = ENXIO; | |
1330 | goto out_unlock; | |
1331 | } | |
1332 | ||
1333 | fsbno = XFS_B_TO_FSBT(mp, start); | |
b686d1f7 JL |
1334 | end = XFS_B_TO_FSB(mp, isize); |
1335 | ||
1336 | for (;;) { | |
1337 | struct xfs_bmbt_irec map[2]; | |
1338 | int nmap = 2; | |
1339 | unsigned int i; | |
1340 | ||
1341 | error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, | |
1342 | XFS_BMAPI_ENTIRE); | |
1343 | if (error) | |
1344 | goto out_unlock; | |
1345 | ||
1346 | /* No extents at given offset, must be beyond EOF */ | |
1347 | if (nmap == 0) { | |
1348 | error = ENXIO; | |
1349 | goto out_unlock; | |
1350 | } | |
1351 | ||
1352 | for (i = 0; i < nmap; i++) { | |
1353 | offset = max_t(loff_t, start, | |
1354 | XFS_FSB_TO_B(mp, map[i].br_startoff)); | |
1355 | ||
1356 | /* Landed in a hole */ | |
1357 | if (map[i].br_startblock == HOLESTARTBLOCK) | |
1358 | goto out; | |
1359 | ||
1360 | /* | |
1361 | * Landed in an unwritten extent, try to search hole | |
1362 | * from page cache. | |
1363 | */ | |
1364 | if (map[i].br_state == XFS_EXT_UNWRITTEN) { | |
1365 | if (xfs_find_get_desired_pgoff(inode, &map[i], | |
1366 | HOLE_OFF, &offset)) | |
1367 | goto out; | |
1368 | } | |
1369 | } | |
3fe3e6b1 | 1370 | |
3fe3e6b1 | 1371 | /* |
b686d1f7 JL |
1372 | * map[0] contains data or its unwritten but contains |
1373 | * data in page cache, probably means that we are | |
1374 | * reading after EOF. We should fix offset to point | |
1375 | * to the end of the file(i.e., there is an implicit | |
1376 | * hole at the end of any file). | |
3fe3e6b1 | 1377 | */ |
b686d1f7 JL |
1378 | if (nmap == 1) { |
1379 | offset = isize; | |
1380 | break; | |
1381 | } | |
1382 | ||
1383 | ASSERT(i > 1); | |
1384 | ||
1385 | /* | |
1386 | * Both mappings contains data, proceed to the next round of | |
1387 | * search if the current reading offset not beyond or hit EOF. | |
1388 | */ | |
1389 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; | |
1390 | start = XFS_FSB_TO_B(mp, fsbno); | |
1391 | if (start >= isize) { | |
1392 | offset = isize; | |
1393 | break; | |
1394 | } | |
3fe3e6b1 JL |
1395 | } |
1396 | ||
b686d1f7 JL |
1397 | out: |
1398 | /* | |
1399 | * At this point, we must have found a hole. However, the returned | |
1400 | * offset may be bigger than the file size as it may be aligned to | |
1401 | * page boundary for unwritten extents, we need to deal with this | |
1402 | * situation in particular. | |
1403 | */ | |
1404 | offset = min_t(loff_t, offset, isize); | |
46a1c2c7 | 1405 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
3fe3e6b1 JL |
1406 | |
1407 | out_unlock: | |
01f4f327 | 1408 | xfs_iunlock(ip, lock); |
3fe3e6b1 JL |
1409 | |
1410 | if (error) | |
1411 | return -error; | |
1412 | return offset; | |
1413 | } | |
1414 | ||
1415 | STATIC loff_t | |
1416 | xfs_file_llseek( | |
1417 | struct file *file, | |
1418 | loff_t offset, | |
1419 | int origin) | |
1420 | { | |
1421 | switch (origin) { | |
1422 | case SEEK_END: | |
1423 | case SEEK_CUR: | |
1424 | case SEEK_SET: | |
1425 | return generic_file_llseek(file, offset, origin); | |
1426 | case SEEK_DATA: | |
834ab122 | 1427 | return xfs_seek_data(file, offset); |
3fe3e6b1 | 1428 | case SEEK_HOLE: |
834ab122 | 1429 | return xfs_seek_hole(file, offset); |
3fe3e6b1 JL |
1430 | default: |
1431 | return -EINVAL; | |
1432 | } | |
1433 | } | |
1434 | ||
4b6f5d20 | 1435 | const struct file_operations xfs_file_operations = { |
3fe3e6b1 | 1436 | .llseek = xfs_file_llseek, |
1da177e4 | 1437 | .read = do_sync_read, |
bb3f724e | 1438 | .write = do_sync_write, |
3562fd45 NS |
1439 | .aio_read = xfs_file_aio_read, |
1440 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1441 | .splice_read = xfs_file_splice_read, |
1442 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1443 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1444 | #ifdef CONFIG_COMPAT |
3562fd45 | 1445 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1446 | #endif |
3562fd45 NS |
1447 | .mmap = xfs_file_mmap, |
1448 | .open = xfs_file_open, | |
1449 | .release = xfs_file_release, | |
1450 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1451 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1452 | }; |
1453 | ||
4b6f5d20 | 1454 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1455 | .open = xfs_dir_open, |
1da177e4 | 1456 | .read = generic_read_dir, |
b8227554 | 1457 | .iterate = xfs_file_readdir, |
59af1584 | 1458 | .llseek = generic_file_llseek, |
3562fd45 | 1459 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1460 | #ifdef CONFIG_COMPAT |
3562fd45 | 1461 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1462 | #endif |
1da2f2db | 1463 | .fsync = xfs_dir_fsync, |
1da177e4 LT |
1464 | }; |
1465 | ||
f0f37e2f | 1466 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1467 | .fault = filemap_fault, |
4f57dbc6 | 1468 | .page_mkwrite = xfs_vm_page_mkwrite, |
0b173bc4 | 1469 | .remap_pages = generic_file_remap_pages, |
6fac0cb4 | 1470 | }; |