Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
7b718769 NS |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. |
3 | * All Rights Reserved. | |
1da177e4 | 4 | * |
7b718769 NS |
5 | * This program is free software; you can redistribute it and/or |
6 | * modify it under the terms of the GNU General Public License as | |
1da177e4 LT |
7 | * published by the Free Software Foundation. |
8 | * | |
7b718769 NS |
9 | * This program is distributed in the hope that it would be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
1da177e4 | 13 | * |
7b718769 NS |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
1da177e4 | 17 | */ |
1da177e4 | 18 | #include "xfs.h" |
dda35b8f | 19 | #include "xfs_fs.h" |
70a9883c | 20 | #include "xfs_shared.h" |
1da177e4 LT |
21 | #include "xfs_log.h" |
22 | #include "xfs_sb.h" | |
a844f451 | 23 | #include "xfs_ag.h" |
1da177e4 | 24 | #include "xfs_trans.h" |
1da177e4 | 25 | #include "xfs_mount.h" |
57062787 DC |
26 | #include "xfs_da_format.h" |
27 | #include "xfs_da_btree.h" | |
1da177e4 | 28 | #include "xfs_bmap_btree.h" |
1da177e4 | 29 | #include "xfs_alloc.h" |
1da177e4 LT |
30 | #include "xfs_dinode.h" |
31 | #include "xfs_inode.h" | |
fd3200be | 32 | #include "xfs_inode_item.h" |
dda35b8f | 33 | #include "xfs_bmap.h" |
c24b5dfa | 34 | #include "xfs_bmap_util.h" |
1da177e4 | 35 | #include "xfs_error.h" |
2b9ab5ab | 36 | #include "xfs_dir2.h" |
c24b5dfa | 37 | #include "xfs_dir2_priv.h" |
ddcd856d | 38 | #include "xfs_ioctl.h" |
dda35b8f | 39 | #include "xfs_trace.h" |
1da177e4 | 40 | |
a27bb332 | 41 | #include <linux/aio.h> |
1da177e4 | 42 | #include <linux/dcache.h> |
2fe17c10 | 43 | #include <linux/falloc.h> |
d126d43f | 44 | #include <linux/pagevec.h> |
1da177e4 | 45 | |
f0f37e2f | 46 | static const struct vm_operations_struct xfs_file_vm_ops; |
1da177e4 | 47 | |
487f84f3 DC |
48 | /* |
49 | * Locking primitives for read and write IO paths to ensure we consistently use | |
50 | * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. | |
51 | */ | |
52 | static inline void | |
53 | xfs_rw_ilock( | |
54 | struct xfs_inode *ip, | |
55 | int type) | |
56 | { | |
57 | if (type & XFS_IOLOCK_EXCL) | |
58 | mutex_lock(&VFS_I(ip)->i_mutex); | |
59 | xfs_ilock(ip, type); | |
60 | } | |
61 | ||
62 | static inline void | |
63 | xfs_rw_iunlock( | |
64 | struct xfs_inode *ip, | |
65 | int type) | |
66 | { | |
67 | xfs_iunlock(ip, type); | |
68 | if (type & XFS_IOLOCK_EXCL) | |
69 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
70 | } | |
71 | ||
72 | static inline void | |
73 | xfs_rw_ilock_demote( | |
74 | struct xfs_inode *ip, | |
75 | int type) | |
76 | { | |
77 | xfs_ilock_demote(ip, type); | |
78 | if (type & XFS_IOLOCK_EXCL) | |
79 | mutex_unlock(&VFS_I(ip)->i_mutex); | |
80 | } | |
81 | ||
dda35b8f CH |
82 | /* |
83 | * xfs_iozero | |
84 | * | |
85 | * xfs_iozero clears the specified range of buffer supplied, | |
86 | * and marks all the affected blocks as valid and modified. If | |
87 | * an affected block is not allocated, it will be allocated. If | |
88 | * an affected block is not completely overwritten, and is not | |
89 | * valid before the operation, it will be read from disk before | |
90 | * being partially zeroed. | |
91 | */ | |
ef9d8733 | 92 | int |
dda35b8f CH |
93 | xfs_iozero( |
94 | struct xfs_inode *ip, /* inode */ | |
95 | loff_t pos, /* offset in file */ | |
96 | size_t count) /* size of data to zero */ | |
97 | { | |
98 | struct page *page; | |
99 | struct address_space *mapping; | |
100 | int status; | |
101 | ||
102 | mapping = VFS_I(ip)->i_mapping; | |
103 | do { | |
104 | unsigned offset, bytes; | |
105 | void *fsdata; | |
106 | ||
107 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
108 | bytes = PAGE_CACHE_SIZE - offset; | |
109 | if (bytes > count) | |
110 | bytes = count; | |
111 | ||
112 | status = pagecache_write_begin(NULL, mapping, pos, bytes, | |
113 | AOP_FLAG_UNINTERRUPTIBLE, | |
114 | &page, &fsdata); | |
115 | if (status) | |
116 | break; | |
117 | ||
118 | zero_user(page, offset, bytes); | |
119 | ||
120 | status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, | |
121 | page, fsdata); | |
122 | WARN_ON(status <= 0); /* can't return less than zero! */ | |
123 | pos += bytes; | |
124 | count -= bytes; | |
125 | status = 0; | |
126 | } while (count); | |
127 | ||
128 | return (-status); | |
129 | } | |
130 | ||
1da2f2db CH |
131 | /* |
132 | * Fsync operations on directories are much simpler than on regular files, | |
133 | * as there is no file data to flush, and thus also no need for explicit | |
134 | * cache flush operations, and there are no non-transaction metadata updates | |
135 | * on directories either. | |
136 | */ | |
137 | STATIC int | |
138 | xfs_dir_fsync( | |
139 | struct file *file, | |
140 | loff_t start, | |
141 | loff_t end, | |
142 | int datasync) | |
143 | { | |
144 | struct xfs_inode *ip = XFS_I(file->f_mapping->host); | |
145 | struct xfs_mount *mp = ip->i_mount; | |
146 | xfs_lsn_t lsn = 0; | |
147 | ||
148 | trace_xfs_dir_fsync(ip); | |
149 | ||
150 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
151 | if (xfs_ipincount(ip)) | |
152 | lsn = ip->i_itemp->ili_last_lsn; | |
153 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
154 | ||
155 | if (!lsn) | |
156 | return 0; | |
157 | return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); | |
158 | } | |
159 | ||
fd3200be CH |
160 | STATIC int |
161 | xfs_file_fsync( | |
162 | struct file *file, | |
02c24a82 JB |
163 | loff_t start, |
164 | loff_t end, | |
fd3200be CH |
165 | int datasync) |
166 | { | |
7ea80859 CH |
167 | struct inode *inode = file->f_mapping->host; |
168 | struct xfs_inode *ip = XFS_I(inode); | |
a27a263b | 169 | struct xfs_mount *mp = ip->i_mount; |
fd3200be CH |
170 | int error = 0; |
171 | int log_flushed = 0; | |
b1037058 | 172 | xfs_lsn_t lsn = 0; |
fd3200be | 173 | |
cca28fb8 | 174 | trace_xfs_file_fsync(ip); |
fd3200be | 175 | |
02c24a82 JB |
176 | error = filemap_write_and_wait_range(inode->i_mapping, start, end); |
177 | if (error) | |
178 | return error; | |
179 | ||
a27a263b | 180 | if (XFS_FORCED_SHUTDOWN(mp)) |
fd3200be CH |
181 | return -XFS_ERROR(EIO); |
182 | ||
183 | xfs_iflags_clear(ip, XFS_ITRUNCATED); | |
184 | ||
a27a263b CH |
185 | if (mp->m_flags & XFS_MOUNT_BARRIER) { |
186 | /* | |
187 | * If we have an RT and/or log subvolume we need to make sure | |
188 | * to flush the write cache the device used for file data | |
189 | * first. This is to ensure newly written file data make | |
190 | * it to disk before logging the new inode size in case of | |
191 | * an extending write. | |
192 | */ | |
193 | if (XFS_IS_REALTIME_INODE(ip)) | |
194 | xfs_blkdev_issue_flush(mp->m_rtdev_targp); | |
195 | else if (mp->m_logdev_targp != mp->m_ddev_targp) | |
196 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
197 | } | |
198 | ||
fd3200be | 199 | /* |
8a9c9980 CH |
200 | * All metadata updates are logged, which means that we just have |
201 | * to flush the log up to the latest LSN that touched the inode. | |
fd3200be CH |
202 | */ |
203 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
8f639dde CH |
204 | if (xfs_ipincount(ip)) { |
205 | if (!datasync || | |
206 | (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP)) | |
207 | lsn = ip->i_itemp->ili_last_lsn; | |
208 | } | |
8a9c9980 | 209 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
fd3200be | 210 | |
8a9c9980 | 211 | if (lsn) |
b1037058 CH |
212 | error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); |
213 | ||
a27a263b CH |
214 | /* |
215 | * If we only have a single device, and the log force about was | |
216 | * a no-op we might have to flush the data device cache here. | |
217 | * This can only happen for fdatasync/O_DSYNC if we were overwriting | |
218 | * an already allocated file and thus do not have any metadata to | |
219 | * commit. | |
220 | */ | |
221 | if ((mp->m_flags & XFS_MOUNT_BARRIER) && | |
222 | mp->m_logdev_targp == mp->m_ddev_targp && | |
223 | !XFS_IS_REALTIME_INODE(ip) && | |
224 | !log_flushed) | |
225 | xfs_blkdev_issue_flush(mp->m_ddev_targp); | |
fd3200be CH |
226 | |
227 | return -error; | |
228 | } | |
229 | ||
00258e36 CH |
230 | STATIC ssize_t |
231 | xfs_file_aio_read( | |
dda35b8f CH |
232 | struct kiocb *iocb, |
233 | const struct iovec *iovp, | |
00258e36 CH |
234 | unsigned long nr_segs, |
235 | loff_t pos) | |
dda35b8f CH |
236 | { |
237 | struct file *file = iocb->ki_filp; | |
238 | struct inode *inode = file->f_mapping->host; | |
00258e36 CH |
239 | struct xfs_inode *ip = XFS_I(inode); |
240 | struct xfs_mount *mp = ip->i_mount; | |
dda35b8f CH |
241 | size_t size = 0; |
242 | ssize_t ret = 0; | |
00258e36 | 243 | int ioflags = 0; |
dda35b8f | 244 | xfs_fsize_t n; |
dda35b8f | 245 | |
dda35b8f CH |
246 | XFS_STATS_INC(xs_read_calls); |
247 | ||
00258e36 CH |
248 | BUG_ON(iocb->ki_pos != pos); |
249 | ||
250 | if (unlikely(file->f_flags & O_DIRECT)) | |
251 | ioflags |= IO_ISDIRECT; | |
252 | if (file->f_mode & FMODE_NOCMTIME) | |
253 | ioflags |= IO_INVIS; | |
254 | ||
52764329 DC |
255 | ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE); |
256 | if (ret < 0) | |
257 | return ret; | |
dda35b8f CH |
258 | |
259 | if (unlikely(ioflags & IO_ISDIRECT)) { | |
260 | xfs_buftarg_t *target = | |
261 | XFS_IS_REALTIME_INODE(ip) ? | |
262 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
fb595814 DC |
263 | if ((pos & target->bt_smask) || (size & target->bt_smask)) { |
264 | if (pos == i_size_read(inode)) | |
00258e36 | 265 | return 0; |
dda35b8f CH |
266 | return -XFS_ERROR(EINVAL); |
267 | } | |
268 | } | |
269 | ||
fb595814 | 270 | n = mp->m_super->s_maxbytes - pos; |
00258e36 | 271 | if (n <= 0 || size == 0) |
dda35b8f CH |
272 | return 0; |
273 | ||
274 | if (n < size) | |
275 | size = n; | |
276 | ||
277 | if (XFS_FORCED_SHUTDOWN(mp)) | |
278 | return -EIO; | |
279 | ||
0c38a251 DC |
280 | /* |
281 | * Locking is a bit tricky here. If we take an exclusive lock | |
282 | * for direct IO, we effectively serialise all new concurrent | |
283 | * read IO to this file and block it behind IO that is currently in | |
284 | * progress because IO in progress holds the IO lock shared. We only | |
285 | * need to hold the lock exclusive to blow away the page cache, so | |
286 | * only take lock exclusively if the page cache needs invalidation. | |
287 | * This allows the normal direct IO case of no page cache pages to | |
288 | * proceeed concurrently without serialisation. | |
289 | */ | |
290 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | |
291 | if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { | |
292 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | |
487f84f3 DC |
293 | xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); |
294 | ||
00258e36 | 295 | if (inode->i_mapping->nrpages) { |
fb595814 DC |
296 | ret = -filemap_write_and_wait_range( |
297 | VFS_I(ip)->i_mapping, | |
298 | pos, -1); | |
487f84f3 DC |
299 | if (ret) { |
300 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | |
301 | return ret; | |
302 | } | |
fb595814 | 303 | truncate_pagecache_range(VFS_I(ip), pos, -1); |
00258e36 | 304 | } |
487f84f3 | 305 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
0c38a251 | 306 | } |
dda35b8f | 307 | |
fb595814 | 308 | trace_xfs_file_read(ip, size, pos, ioflags); |
dda35b8f | 309 | |
fb595814 | 310 | ret = generic_file_aio_read(iocb, iovp, nr_segs, pos); |
dda35b8f CH |
311 | if (ret > 0) |
312 | XFS_STATS_ADD(xs_read_bytes, ret); | |
313 | ||
487f84f3 | 314 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
315 | return ret; |
316 | } | |
317 | ||
00258e36 CH |
318 | STATIC ssize_t |
319 | xfs_file_splice_read( | |
dda35b8f CH |
320 | struct file *infilp, |
321 | loff_t *ppos, | |
322 | struct pipe_inode_info *pipe, | |
323 | size_t count, | |
00258e36 | 324 | unsigned int flags) |
dda35b8f | 325 | { |
00258e36 | 326 | struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); |
00258e36 | 327 | int ioflags = 0; |
dda35b8f CH |
328 | ssize_t ret; |
329 | ||
330 | XFS_STATS_INC(xs_read_calls); | |
00258e36 CH |
331 | |
332 | if (infilp->f_mode & FMODE_NOCMTIME) | |
333 | ioflags |= IO_INVIS; | |
334 | ||
dda35b8f CH |
335 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
336 | return -EIO; | |
337 | ||
487f84f3 | 338 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
dda35b8f | 339 | |
dda35b8f CH |
340 | trace_xfs_file_splice_read(ip, count, *ppos, ioflags); |
341 | ||
342 | ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); | |
343 | if (ret > 0) | |
344 | XFS_STATS_ADD(xs_read_bytes, ret); | |
345 | ||
487f84f3 | 346 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
dda35b8f CH |
347 | return ret; |
348 | } | |
349 | ||
487f84f3 DC |
350 | /* |
351 | * xfs_file_splice_write() does not use xfs_rw_ilock() because | |
352 | * generic_file_splice_write() takes the i_mutex itself. This, in theory, | |
353 | * couuld cause lock inversions between the aio_write path and the splice path | |
354 | * if someone is doing concurrent splice(2) based writes and write(2) based | |
355 | * writes to the same inode. The only real way to fix this is to re-implement | |
356 | * the generic code here with correct locking orders. | |
357 | */ | |
00258e36 CH |
358 | STATIC ssize_t |
359 | xfs_file_splice_write( | |
dda35b8f CH |
360 | struct pipe_inode_info *pipe, |
361 | struct file *outfilp, | |
362 | loff_t *ppos, | |
363 | size_t count, | |
00258e36 | 364 | unsigned int flags) |
dda35b8f | 365 | { |
dda35b8f | 366 | struct inode *inode = outfilp->f_mapping->host; |
00258e36 | 367 | struct xfs_inode *ip = XFS_I(inode); |
00258e36 CH |
368 | int ioflags = 0; |
369 | ssize_t ret; | |
dda35b8f CH |
370 | |
371 | XFS_STATS_INC(xs_write_calls); | |
00258e36 CH |
372 | |
373 | if (outfilp->f_mode & FMODE_NOCMTIME) | |
374 | ioflags |= IO_INVIS; | |
375 | ||
dda35b8f CH |
376 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) |
377 | return -EIO; | |
378 | ||
379 | xfs_ilock(ip, XFS_IOLOCK_EXCL); | |
380 | ||
dda35b8f CH |
381 | trace_xfs_file_splice_write(ip, count, *ppos, ioflags); |
382 | ||
383 | ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); | |
ce7ae151 CH |
384 | if (ret > 0) |
385 | XFS_STATS_ADD(xs_write_bytes, ret); | |
dda35b8f | 386 | |
dda35b8f CH |
387 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
388 | return ret; | |
389 | } | |
390 | ||
391 | /* | |
193aec10 CH |
392 | * This routine is called to handle zeroing any space in the last block of the |
393 | * file that is beyond the EOF. We do this since the size is being increased | |
394 | * without writing anything to that block and we don't want to read the | |
395 | * garbage on the disk. | |
dda35b8f CH |
396 | */ |
397 | STATIC int /* error (positive) */ | |
398 | xfs_zero_last_block( | |
193aec10 CH |
399 | struct xfs_inode *ip, |
400 | xfs_fsize_t offset, | |
401 | xfs_fsize_t isize) | |
dda35b8f | 402 | { |
193aec10 CH |
403 | struct xfs_mount *mp = ip->i_mount; |
404 | xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); | |
405 | int zero_offset = XFS_B_FSB_OFFSET(mp, isize); | |
406 | int zero_len; | |
407 | int nimaps = 1; | |
408 | int error = 0; | |
409 | struct xfs_bmbt_irec imap; | |
dda35b8f | 410 | |
193aec10 | 411 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 412 | error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0); |
193aec10 | 413 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
5c8ed202 | 414 | if (error) |
dda35b8f | 415 | return error; |
193aec10 | 416 | |
dda35b8f | 417 | ASSERT(nimaps > 0); |
193aec10 | 418 | |
dda35b8f CH |
419 | /* |
420 | * If the block underlying isize is just a hole, then there | |
421 | * is nothing to zero. | |
422 | */ | |
193aec10 | 423 | if (imap.br_startblock == HOLESTARTBLOCK) |
dda35b8f | 424 | return 0; |
dda35b8f CH |
425 | |
426 | zero_len = mp->m_sb.sb_blocksize - zero_offset; | |
427 | if (isize + zero_len > offset) | |
428 | zero_len = offset - isize; | |
193aec10 | 429 | return xfs_iozero(ip, isize, zero_len); |
dda35b8f CH |
430 | } |
431 | ||
432 | /* | |
193aec10 CH |
433 | * Zero any on disk space between the current EOF and the new, larger EOF. |
434 | * | |
435 | * This handles the normal case of zeroing the remainder of the last block in | |
436 | * the file and the unusual case of zeroing blocks out beyond the size of the | |
437 | * file. This second case only happens with fixed size extents and when the | |
438 | * system crashes before the inode size was updated but after blocks were | |
439 | * allocated. | |
440 | * | |
441 | * Expects the iolock to be held exclusive, and will take the ilock internally. | |
dda35b8f | 442 | */ |
dda35b8f CH |
443 | int /* error (positive) */ |
444 | xfs_zero_eof( | |
193aec10 CH |
445 | struct xfs_inode *ip, |
446 | xfs_off_t offset, /* starting I/O offset */ | |
447 | xfs_fsize_t isize) /* current inode size */ | |
dda35b8f | 448 | { |
193aec10 CH |
449 | struct xfs_mount *mp = ip->i_mount; |
450 | xfs_fileoff_t start_zero_fsb; | |
451 | xfs_fileoff_t end_zero_fsb; | |
452 | xfs_fileoff_t zero_count_fsb; | |
453 | xfs_fileoff_t last_fsb; | |
454 | xfs_fileoff_t zero_off; | |
455 | xfs_fsize_t zero_len; | |
456 | int nimaps; | |
457 | int error = 0; | |
458 | struct xfs_bmbt_irec imap; | |
459 | ||
460 | ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
dda35b8f CH |
461 | ASSERT(offset > isize); |
462 | ||
463 | /* | |
464 | * First handle zeroing the block on which isize resides. | |
193aec10 | 465 | * |
dda35b8f CH |
466 | * We only zero a part of that block so it is handled specially. |
467 | */ | |
193aec10 CH |
468 | if (XFS_B_FSB_OFFSET(mp, isize) != 0) { |
469 | error = xfs_zero_last_block(ip, offset, isize); | |
470 | if (error) | |
471 | return error; | |
dda35b8f CH |
472 | } |
473 | ||
474 | /* | |
193aec10 CH |
475 | * Calculate the range between the new size and the old where blocks |
476 | * needing to be zeroed may exist. | |
477 | * | |
478 | * To get the block where the last byte in the file currently resides, | |
479 | * we need to subtract one from the size and truncate back to a block | |
480 | * boundary. We subtract 1 in case the size is exactly on a block | |
481 | * boundary. | |
dda35b8f CH |
482 | */ |
483 | last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; | |
484 | start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); | |
485 | end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); | |
486 | ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); | |
487 | if (last_fsb == end_zero_fsb) { | |
488 | /* | |
489 | * The size was only incremented on its last block. | |
490 | * We took care of that above, so just return. | |
491 | */ | |
492 | return 0; | |
493 | } | |
494 | ||
495 | ASSERT(start_zero_fsb <= end_zero_fsb); | |
496 | while (start_zero_fsb <= end_zero_fsb) { | |
497 | nimaps = 1; | |
498 | zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; | |
193aec10 CH |
499 | |
500 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
5c8ed202 DC |
501 | error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb, |
502 | &imap, &nimaps, 0); | |
193aec10 CH |
503 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
504 | if (error) | |
dda35b8f | 505 | return error; |
193aec10 | 506 | |
dda35b8f CH |
507 | ASSERT(nimaps > 0); |
508 | ||
509 | if (imap.br_state == XFS_EXT_UNWRITTEN || | |
510 | imap.br_startblock == HOLESTARTBLOCK) { | |
dda35b8f CH |
511 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; |
512 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
513 | continue; | |
514 | } | |
515 | ||
516 | /* | |
517 | * There are blocks we need to zero. | |
dda35b8f | 518 | */ |
dda35b8f CH |
519 | zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); |
520 | zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); | |
521 | ||
522 | if ((zero_off + zero_len) > offset) | |
523 | zero_len = offset - zero_off; | |
524 | ||
525 | error = xfs_iozero(ip, zero_off, zero_len); | |
193aec10 CH |
526 | if (error) |
527 | return error; | |
dda35b8f CH |
528 | |
529 | start_zero_fsb = imap.br_startoff + imap.br_blockcount; | |
530 | ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); | |
dda35b8f CH |
531 | } |
532 | ||
533 | return 0; | |
dda35b8f CH |
534 | } |
535 | ||
4d8d1581 DC |
536 | /* |
537 | * Common pre-write limit and setup checks. | |
538 | * | |
5bf1f262 CH |
539 | * Called with the iolocked held either shared and exclusive according to |
540 | * @iolock, and returns with it held. Might upgrade the iolock to exclusive | |
541 | * if called for a direct write beyond i_size. | |
4d8d1581 DC |
542 | */ |
543 | STATIC ssize_t | |
544 | xfs_file_aio_write_checks( | |
545 | struct file *file, | |
546 | loff_t *pos, | |
547 | size_t *count, | |
548 | int *iolock) | |
549 | { | |
550 | struct inode *inode = file->f_mapping->host; | |
551 | struct xfs_inode *ip = XFS_I(inode); | |
4d8d1581 DC |
552 | int error = 0; |
553 | ||
7271d243 | 554 | restart: |
4d8d1581 | 555 | error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); |
467f7899 | 556 | if (error) |
4d8d1581 | 557 | return error; |
4d8d1581 | 558 | |
4d8d1581 DC |
559 | /* |
560 | * If the offset is beyond the size of the file, we need to zero any | |
561 | * blocks that fall between the existing EOF and the start of this | |
2813d682 | 562 | * write. If zeroing is needed and we are currently holding the |
467f7899 CH |
563 | * iolock shared, we need to update it to exclusive which implies |
564 | * having to redo all checks before. | |
4d8d1581 | 565 | */ |
2813d682 | 566 | if (*pos > i_size_read(inode)) { |
7271d243 | 567 | if (*iolock == XFS_IOLOCK_SHARED) { |
467f7899 | 568 | xfs_rw_iunlock(ip, *iolock); |
7271d243 | 569 | *iolock = XFS_IOLOCK_EXCL; |
467f7899 | 570 | xfs_rw_ilock(ip, *iolock); |
7271d243 DC |
571 | goto restart; |
572 | } | |
ce7ae151 | 573 | error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); |
467f7899 CH |
574 | if (error) |
575 | return error; | |
7271d243 | 576 | } |
4d8d1581 | 577 | |
8a9c9980 CH |
578 | /* |
579 | * Updating the timestamps will grab the ilock again from | |
580 | * xfs_fs_dirty_inode, so we have to call it after dropping the | |
581 | * lock above. Eventually we should look into a way to avoid | |
582 | * the pointless lock roundtrip. | |
583 | */ | |
c3b2da31 JB |
584 | if (likely(!(file->f_mode & FMODE_NOCMTIME))) { |
585 | error = file_update_time(file); | |
586 | if (error) | |
587 | return error; | |
588 | } | |
8a9c9980 | 589 | |
4d8d1581 DC |
590 | /* |
591 | * If we're writing the file then make sure to clear the setuid and | |
592 | * setgid bits if the process is not being run by root. This keeps | |
593 | * people from modifying setuid and setgid binaries. | |
594 | */ | |
595 | return file_remove_suid(file); | |
4d8d1581 DC |
596 | } |
597 | ||
f0d26e86 DC |
598 | /* |
599 | * xfs_file_dio_aio_write - handle direct IO writes | |
600 | * | |
601 | * Lock the inode appropriately to prepare for and issue a direct IO write. | |
eda77982 | 602 | * By separating it from the buffered write path we remove all the tricky to |
f0d26e86 DC |
603 | * follow locking changes and looping. |
604 | * | |
eda77982 DC |
605 | * If there are cached pages or we're extending the file, we need IOLOCK_EXCL |
606 | * until we're sure the bytes at the new EOF have been zeroed and/or the cached | |
607 | * pages are flushed out. | |
608 | * | |
609 | * In most cases the direct IO writes will be done holding IOLOCK_SHARED | |
610 | * allowing them to be done in parallel with reads and other direct IO writes. | |
611 | * However, if the IO is not aligned to filesystem blocks, the direct IO layer | |
612 | * needs to do sub-block zeroing and that requires serialisation against other | |
613 | * direct IOs to the same block. In this case we need to serialise the | |
614 | * submission of the unaligned IOs so that we don't get racing block zeroing in | |
615 | * the dio layer. To avoid the problem with aio, we also need to wait for | |
616 | * outstanding IOs to complete so that unwritten extent conversion is completed | |
617 | * before we try to map the overlapping block. This is currently implemented by | |
4a06fd26 | 618 | * hitting it with a big hammer (i.e. inode_dio_wait()). |
eda77982 | 619 | * |
f0d26e86 DC |
620 | * Returns with locks held indicated by @iolock and errors indicated by |
621 | * negative return values. | |
622 | */ | |
623 | STATIC ssize_t | |
624 | xfs_file_dio_aio_write( | |
625 | struct kiocb *iocb, | |
626 | const struct iovec *iovp, | |
627 | unsigned long nr_segs, | |
628 | loff_t pos, | |
d0606464 | 629 | size_t ocount) |
f0d26e86 DC |
630 | { |
631 | struct file *file = iocb->ki_filp; | |
632 | struct address_space *mapping = file->f_mapping; | |
633 | struct inode *inode = mapping->host; | |
634 | struct xfs_inode *ip = XFS_I(inode); | |
635 | struct xfs_mount *mp = ip->i_mount; | |
636 | ssize_t ret = 0; | |
f0d26e86 | 637 | size_t count = ocount; |
eda77982 | 638 | int unaligned_io = 0; |
d0606464 | 639 | int iolock; |
f0d26e86 DC |
640 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
641 | mp->m_rtdev_targp : mp->m_ddev_targp; | |
642 | ||
f0d26e86 DC |
643 | if ((pos & target->bt_smask) || (count & target->bt_smask)) |
644 | return -XFS_ERROR(EINVAL); | |
645 | ||
eda77982 DC |
646 | if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) |
647 | unaligned_io = 1; | |
648 | ||
7271d243 DC |
649 | /* |
650 | * We don't need to take an exclusive lock unless there page cache needs | |
651 | * to be invalidated or unaligned IO is being executed. We don't need to | |
652 | * consider the EOF extension case here because | |
653 | * xfs_file_aio_write_checks() will relock the inode as necessary for | |
654 | * EOF zeroing cases and fill out the new inode size as appropriate. | |
655 | */ | |
656 | if (unaligned_io || mapping->nrpages) | |
d0606464 | 657 | iolock = XFS_IOLOCK_EXCL; |
f0d26e86 | 658 | else |
d0606464 CH |
659 | iolock = XFS_IOLOCK_SHARED; |
660 | xfs_rw_ilock(ip, iolock); | |
c58cb165 CH |
661 | |
662 | /* | |
663 | * Recheck if there are cached pages that need invalidate after we got | |
664 | * the iolock to protect against other threads adding new pages while | |
665 | * we were waiting for the iolock. | |
666 | */ | |
d0606464 CH |
667 | if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { |
668 | xfs_rw_iunlock(ip, iolock); | |
669 | iolock = XFS_IOLOCK_EXCL; | |
670 | xfs_rw_ilock(ip, iolock); | |
c58cb165 | 671 | } |
f0d26e86 | 672 | |
d0606464 | 673 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 674 | if (ret) |
d0606464 | 675 | goto out; |
f0d26e86 DC |
676 | |
677 | if (mapping->nrpages) { | |
fb595814 DC |
678 | ret = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
679 | pos, -1); | |
f0d26e86 | 680 | if (ret) |
d0606464 | 681 | goto out; |
fb595814 | 682 | truncate_pagecache_range(VFS_I(ip), pos, -1); |
f0d26e86 DC |
683 | } |
684 | ||
eda77982 DC |
685 | /* |
686 | * If we are doing unaligned IO, wait for all other IO to drain, | |
687 | * otherwise demote the lock if we had to flush cached pages | |
688 | */ | |
689 | if (unaligned_io) | |
4a06fd26 | 690 | inode_dio_wait(inode); |
d0606464 | 691 | else if (iolock == XFS_IOLOCK_EXCL) { |
f0d26e86 | 692 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
d0606464 | 693 | iolock = XFS_IOLOCK_SHARED; |
f0d26e86 DC |
694 | } |
695 | ||
696 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | |
697 | ret = generic_file_direct_write(iocb, iovp, | |
698 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | |
699 | ||
d0606464 CH |
700 | out: |
701 | xfs_rw_iunlock(ip, iolock); | |
702 | ||
f0d26e86 DC |
703 | /* No fallback to buffered IO on errors for XFS. */ |
704 | ASSERT(ret < 0 || ret == count); | |
705 | return ret; | |
706 | } | |
707 | ||
00258e36 | 708 | STATIC ssize_t |
637bbc75 | 709 | xfs_file_buffered_aio_write( |
dda35b8f CH |
710 | struct kiocb *iocb, |
711 | const struct iovec *iovp, | |
00258e36 | 712 | unsigned long nr_segs, |
637bbc75 | 713 | loff_t pos, |
d0606464 | 714 | size_t ocount) |
dda35b8f CH |
715 | { |
716 | struct file *file = iocb->ki_filp; | |
717 | struct address_space *mapping = file->f_mapping; | |
718 | struct inode *inode = mapping->host; | |
00258e36 | 719 | struct xfs_inode *ip = XFS_I(inode); |
637bbc75 DC |
720 | ssize_t ret; |
721 | int enospc = 0; | |
d0606464 | 722 | int iolock = XFS_IOLOCK_EXCL; |
637bbc75 | 723 | size_t count = ocount; |
dda35b8f | 724 | |
d0606464 | 725 | xfs_rw_ilock(ip, iolock); |
dda35b8f | 726 | |
d0606464 | 727 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
4d8d1581 | 728 | if (ret) |
d0606464 | 729 | goto out; |
dda35b8f CH |
730 | |
731 | /* We can write back this queue in page reclaim */ | |
732 | current->backing_dev_info = mapping->backing_dev_info; | |
733 | ||
dda35b8f | 734 | write_retry: |
637bbc75 DC |
735 | trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); |
736 | ret = generic_file_buffered_write(iocb, iovp, nr_segs, | |
9aa05000 DC |
737 | pos, &iocb->ki_pos, count, 0); |
738 | ||
637bbc75 | 739 | /* |
9aa05000 DC |
740 | * If we just got an ENOSPC, try to write back all dirty inodes to |
741 | * convert delalloc space to free up some of the excess reserved | |
742 | * metadata space. | |
637bbc75 DC |
743 | */ |
744 | if (ret == -ENOSPC && !enospc) { | |
637bbc75 | 745 | enospc = 1; |
9aa05000 DC |
746 | xfs_flush_inodes(ip->i_mount); |
747 | goto write_retry; | |
dda35b8f | 748 | } |
d0606464 | 749 | |
dda35b8f | 750 | current->backing_dev_info = NULL; |
d0606464 CH |
751 | out: |
752 | xfs_rw_iunlock(ip, iolock); | |
637bbc75 DC |
753 | return ret; |
754 | } | |
755 | ||
756 | STATIC ssize_t | |
757 | xfs_file_aio_write( | |
758 | struct kiocb *iocb, | |
759 | const struct iovec *iovp, | |
760 | unsigned long nr_segs, | |
761 | loff_t pos) | |
762 | { | |
763 | struct file *file = iocb->ki_filp; | |
764 | struct address_space *mapping = file->f_mapping; | |
765 | struct inode *inode = mapping->host; | |
766 | struct xfs_inode *ip = XFS_I(inode); | |
767 | ssize_t ret; | |
637bbc75 DC |
768 | size_t ocount = 0; |
769 | ||
770 | XFS_STATS_INC(xs_write_calls); | |
771 | ||
772 | BUG_ON(iocb->ki_pos != pos); | |
773 | ||
774 | ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); | |
775 | if (ret) | |
776 | return ret; | |
777 | ||
778 | if (ocount == 0) | |
779 | return 0; | |
780 | ||
d9457dc0 JK |
781 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
782 | ret = -EIO; | |
783 | goto out; | |
784 | } | |
637bbc75 DC |
785 | |
786 | if (unlikely(file->f_flags & O_DIRECT)) | |
d0606464 | 787 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); |
637bbc75 DC |
788 | else |
789 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | |
d0606464 | 790 | ocount); |
dda35b8f | 791 | |
d0606464 CH |
792 | if (ret > 0) { |
793 | ssize_t err; | |
dda35b8f | 794 | |
d0606464 | 795 | XFS_STATS_ADD(xs_write_bytes, ret); |
dda35b8f | 796 | |
d0606464 CH |
797 | /* Handle various SYNC-type writes */ |
798 | err = generic_write_sync(file, pos, ret); | |
799 | if (err < 0) | |
800 | ret = err; | |
dda35b8f CH |
801 | } |
802 | ||
d9457dc0 | 803 | out: |
a363f0c2 | 804 | return ret; |
dda35b8f CH |
805 | } |
806 | ||
2fe17c10 CH |
807 | STATIC long |
808 | xfs_file_fallocate( | |
83aee9e4 CH |
809 | struct file *file, |
810 | int mode, | |
811 | loff_t offset, | |
812 | loff_t len) | |
2fe17c10 | 813 | { |
83aee9e4 CH |
814 | struct inode *inode = file_inode(file); |
815 | struct xfs_inode *ip = XFS_I(inode); | |
816 | struct xfs_trans *tp; | |
817 | long error; | |
818 | loff_t new_size = 0; | |
2fe17c10 | 819 | |
83aee9e4 CH |
820 | if (!S_ISREG(inode->i_mode)) |
821 | return -EINVAL; | |
2fe17c10 CH |
822 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
823 | return -EOPNOTSUPP; | |
824 | ||
2fe17c10 | 825 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
83aee9e4 CH |
826 | if (mode & FALLOC_FL_PUNCH_HOLE) { |
827 | error = xfs_free_file_space(ip, offset, len); | |
828 | if (error) | |
829 | goto out_unlock; | |
830 | } else { | |
831 | if (!(mode & FALLOC_FL_KEEP_SIZE) && | |
832 | offset + len > i_size_read(inode)) { | |
833 | new_size = offset + len; | |
834 | error = -inode_newsize_ok(inode, new_size); | |
835 | if (error) | |
836 | goto out_unlock; | |
837 | } | |
2fe17c10 | 838 | |
83aee9e4 CH |
839 | error = xfs_alloc_file_space(ip, offset, len, |
840 | XFS_BMAPI_PREALLOC); | |
2fe17c10 CH |
841 | if (error) |
842 | goto out_unlock; | |
843 | } | |
844 | ||
83aee9e4 CH |
845 | tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID); |
846 | error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0); | |
847 | if (error) { | |
848 | xfs_trans_cancel(tp, 0); | |
849 | goto out_unlock; | |
850 | } | |
851 | ||
852 | xfs_ilock(ip, XFS_ILOCK_EXCL); | |
853 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); | |
854 | ip->i_d.di_mode &= ~S_ISUID; | |
855 | if (ip->i_d.di_mode & S_IXGRP) | |
856 | ip->i_d.di_mode &= ~S_ISGID; | |
82878897 | 857 | |
83aee9e4 CH |
858 | if (!(mode & FALLOC_FL_PUNCH_HOLE)) |
859 | ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC; | |
860 | ||
861 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
862 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
863 | ||
864 | if (file->f_flags & O_DSYNC) | |
865 | xfs_trans_set_sync(tp); | |
866 | error = xfs_trans_commit(tp, 0); | |
2fe17c10 CH |
867 | if (error) |
868 | goto out_unlock; | |
869 | ||
870 | /* Change file size if needed */ | |
871 | if (new_size) { | |
872 | struct iattr iattr; | |
873 | ||
874 | iattr.ia_valid = ATTR_SIZE; | |
875 | iattr.ia_size = new_size; | |
83aee9e4 | 876 | error = xfs_setattr_size(ip, &iattr); |
2fe17c10 CH |
877 | } |
878 | ||
879 | out_unlock: | |
880 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
83aee9e4 | 881 | return -error; |
2fe17c10 CH |
882 | } |
883 | ||
884 | ||
1da177e4 | 885 | STATIC int |
3562fd45 | 886 | xfs_file_open( |
1da177e4 | 887 | struct inode *inode, |
f999a5bf | 888 | struct file *file) |
1da177e4 | 889 | { |
f999a5bf | 890 | if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) |
1da177e4 | 891 | return -EFBIG; |
f999a5bf CH |
892 | if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) |
893 | return -EIO; | |
894 | return 0; | |
895 | } | |
896 | ||
897 | STATIC int | |
898 | xfs_dir_open( | |
899 | struct inode *inode, | |
900 | struct file *file) | |
901 | { | |
902 | struct xfs_inode *ip = XFS_I(inode); | |
903 | int mode; | |
904 | int error; | |
905 | ||
906 | error = xfs_file_open(inode, file); | |
907 | if (error) | |
908 | return error; | |
909 | ||
910 | /* | |
911 | * If there are any blocks, read-ahead block 0 as we're almost | |
912 | * certain to have the next operation be a read there. | |
913 | */ | |
914 | mode = xfs_ilock_map_shared(ip); | |
915 | if (ip->i_d.di_nextents > 0) | |
33363fee | 916 | xfs_dir3_data_readahead(NULL, ip, 0, -1); |
f999a5bf CH |
917 | xfs_iunlock(ip, mode); |
918 | return 0; | |
1da177e4 LT |
919 | } |
920 | ||
1da177e4 | 921 | STATIC int |
3562fd45 | 922 | xfs_file_release( |
1da177e4 LT |
923 | struct inode *inode, |
924 | struct file *filp) | |
925 | { | |
739bfb2a | 926 | return -xfs_release(XFS_I(inode)); |
1da177e4 LT |
927 | } |
928 | ||
1da177e4 | 929 | STATIC int |
3562fd45 | 930 | xfs_file_readdir( |
b8227554 AV |
931 | struct file *file, |
932 | struct dir_context *ctx) | |
1da177e4 | 933 | { |
b8227554 | 934 | struct inode *inode = file_inode(file); |
739bfb2a | 935 | xfs_inode_t *ip = XFS_I(inode); |
051e7cd4 CH |
936 | int error; |
937 | size_t bufsize; | |
938 | ||
939 | /* | |
940 | * The Linux API doesn't pass down the total size of the buffer | |
941 | * we read into down to the filesystem. With the filldir concept | |
942 | * it's not needed for correct information, but the XFS dir2 leaf | |
943 | * code wants an estimate of the buffer size to calculate it's | |
944 | * readahead window and size the buffers used for mapping to | |
945 | * physical blocks. | |
946 | * | |
947 | * Try to give it an estimate that's good enough, maybe at some | |
948 | * point we can change the ->readdir prototype to include the | |
a9cc799e | 949 | * buffer size. For now we use the current glibc buffer size. |
051e7cd4 | 950 | */ |
a9cc799e | 951 | bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); |
051e7cd4 | 952 | |
b8227554 | 953 | error = xfs_readdir(ip, ctx, bufsize); |
051e7cd4 CH |
954 | if (error) |
955 | return -error; | |
956 | return 0; | |
1da177e4 LT |
957 | } |
958 | ||
1da177e4 | 959 | STATIC int |
3562fd45 | 960 | xfs_file_mmap( |
1da177e4 LT |
961 | struct file *filp, |
962 | struct vm_area_struct *vma) | |
963 | { | |
3562fd45 | 964 | vma->vm_ops = &xfs_file_vm_ops; |
6fac0cb4 | 965 | |
fbc1462b | 966 | file_accessed(filp); |
1da177e4 LT |
967 | return 0; |
968 | } | |
969 | ||
4f57dbc6 DC |
970 | /* |
971 | * mmap()d file has taken write protection fault and is being made | |
972 | * writable. We can set the page state up correctly for a writable | |
973 | * page, which means we can do correct delalloc accounting (ENOSPC | |
974 | * checking!) and unwritten extent mapping. | |
975 | */ | |
976 | STATIC int | |
977 | xfs_vm_page_mkwrite( | |
978 | struct vm_area_struct *vma, | |
c2ec175c | 979 | struct vm_fault *vmf) |
4f57dbc6 | 980 | { |
c2ec175c | 981 | return block_page_mkwrite(vma, vmf, xfs_get_blocks); |
4f57dbc6 DC |
982 | } |
983 | ||
d126d43f JL |
984 | /* |
985 | * This type is designed to indicate the type of offset we would like | |
986 | * to search from page cache for either xfs_seek_data() or xfs_seek_hole(). | |
987 | */ | |
988 | enum { | |
989 | HOLE_OFF = 0, | |
990 | DATA_OFF, | |
991 | }; | |
992 | ||
993 | /* | |
994 | * Lookup the desired type of offset from the given page. | |
995 | * | |
996 | * On success, return true and the offset argument will point to the | |
997 | * start of the region that was found. Otherwise this function will | |
998 | * return false and keep the offset argument unchanged. | |
999 | */ | |
1000 | STATIC bool | |
1001 | xfs_lookup_buffer_offset( | |
1002 | struct page *page, | |
1003 | loff_t *offset, | |
1004 | unsigned int type) | |
1005 | { | |
1006 | loff_t lastoff = page_offset(page); | |
1007 | bool found = false; | |
1008 | struct buffer_head *bh, *head; | |
1009 | ||
1010 | bh = head = page_buffers(page); | |
1011 | do { | |
1012 | /* | |
1013 | * Unwritten extents that have data in the page | |
1014 | * cache covering them can be identified by the | |
1015 | * BH_Unwritten state flag. Pages with multiple | |
1016 | * buffers might have a mix of holes, data and | |
1017 | * unwritten extents - any buffer with valid | |
1018 | * data in it should have BH_Uptodate flag set | |
1019 | * on it. | |
1020 | */ | |
1021 | if (buffer_unwritten(bh) || | |
1022 | buffer_uptodate(bh)) { | |
1023 | if (type == DATA_OFF) | |
1024 | found = true; | |
1025 | } else { | |
1026 | if (type == HOLE_OFF) | |
1027 | found = true; | |
1028 | } | |
1029 | ||
1030 | if (found) { | |
1031 | *offset = lastoff; | |
1032 | break; | |
1033 | } | |
1034 | lastoff += bh->b_size; | |
1035 | } while ((bh = bh->b_this_page) != head); | |
1036 | ||
1037 | return found; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * This routine is called to find out and return a data or hole offset | |
1042 | * from the page cache for unwritten extents according to the desired | |
1043 | * type for xfs_seek_data() or xfs_seek_hole(). | |
1044 | * | |
1045 | * The argument offset is used to tell where we start to search from the | |
1046 | * page cache. Map is used to figure out the end points of the range to | |
1047 | * lookup pages. | |
1048 | * | |
1049 | * Return true if the desired type of offset was found, and the argument | |
1050 | * offset is filled with that address. Otherwise, return false and keep | |
1051 | * offset unchanged. | |
1052 | */ | |
1053 | STATIC bool | |
1054 | xfs_find_get_desired_pgoff( | |
1055 | struct inode *inode, | |
1056 | struct xfs_bmbt_irec *map, | |
1057 | unsigned int type, | |
1058 | loff_t *offset) | |
1059 | { | |
1060 | struct xfs_inode *ip = XFS_I(inode); | |
1061 | struct xfs_mount *mp = ip->i_mount; | |
1062 | struct pagevec pvec; | |
1063 | pgoff_t index; | |
1064 | pgoff_t end; | |
1065 | loff_t endoff; | |
1066 | loff_t startoff = *offset; | |
1067 | loff_t lastoff = startoff; | |
1068 | bool found = false; | |
1069 | ||
1070 | pagevec_init(&pvec, 0); | |
1071 | ||
1072 | index = startoff >> PAGE_CACHE_SHIFT; | |
1073 | endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); | |
1074 | end = endoff >> PAGE_CACHE_SHIFT; | |
1075 | do { | |
1076 | int want; | |
1077 | unsigned nr_pages; | |
1078 | unsigned int i; | |
1079 | ||
1080 | want = min_t(pgoff_t, end - index, PAGEVEC_SIZE); | |
1081 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, | |
1082 | want); | |
1083 | /* | |
1084 | * No page mapped into given range. If we are searching holes | |
1085 | * and if this is the first time we got into the loop, it means | |
1086 | * that the given offset is landed in a hole, return it. | |
1087 | * | |
1088 | * If we have already stepped through some block buffers to find | |
1089 | * holes but they all contains data. In this case, the last | |
1090 | * offset is already updated and pointed to the end of the last | |
1091 | * mapped page, if it does not reach the endpoint to search, | |
1092 | * that means there should be a hole between them. | |
1093 | */ | |
1094 | if (nr_pages == 0) { | |
1095 | /* Data search found nothing */ | |
1096 | if (type == DATA_OFF) | |
1097 | break; | |
1098 | ||
1099 | ASSERT(type == HOLE_OFF); | |
1100 | if (lastoff == startoff || lastoff < endoff) { | |
1101 | found = true; | |
1102 | *offset = lastoff; | |
1103 | } | |
1104 | break; | |
1105 | } | |
1106 | ||
1107 | /* | |
1108 | * At lease we found one page. If this is the first time we | |
1109 | * step into the loop, and if the first page index offset is | |
1110 | * greater than the given search offset, a hole was found. | |
1111 | */ | |
1112 | if (type == HOLE_OFF && lastoff == startoff && | |
1113 | lastoff < page_offset(pvec.pages[0])) { | |
1114 | found = true; | |
1115 | break; | |
1116 | } | |
1117 | ||
1118 | for (i = 0; i < nr_pages; i++) { | |
1119 | struct page *page = pvec.pages[i]; | |
1120 | loff_t b_offset; | |
1121 | ||
1122 | /* | |
1123 | * At this point, the page may be truncated or | |
1124 | * invalidated (changing page->mapping to NULL), | |
1125 | * or even swizzled back from swapper_space to tmpfs | |
1126 | * file mapping. However, page->index will not change | |
1127 | * because we have a reference on the page. | |
1128 | * | |
1129 | * Searching done if the page index is out of range. | |
1130 | * If the current offset is not reaches the end of | |
1131 | * the specified search range, there should be a hole | |
1132 | * between them. | |
1133 | */ | |
1134 | if (page->index > end) { | |
1135 | if (type == HOLE_OFF && lastoff < endoff) { | |
1136 | *offset = lastoff; | |
1137 | found = true; | |
1138 | } | |
1139 | goto out; | |
1140 | } | |
1141 | ||
1142 | lock_page(page); | |
1143 | /* | |
1144 | * Page truncated or invalidated(page->mapping == NULL). | |
1145 | * We can freely skip it and proceed to check the next | |
1146 | * page. | |
1147 | */ | |
1148 | if (unlikely(page->mapping != inode->i_mapping)) { | |
1149 | unlock_page(page); | |
1150 | continue; | |
1151 | } | |
1152 | ||
1153 | if (!page_has_buffers(page)) { | |
1154 | unlock_page(page); | |
1155 | continue; | |
1156 | } | |
1157 | ||
1158 | found = xfs_lookup_buffer_offset(page, &b_offset, type); | |
1159 | if (found) { | |
1160 | /* | |
1161 | * The found offset may be less than the start | |
1162 | * point to search if this is the first time to | |
1163 | * come here. | |
1164 | */ | |
1165 | *offset = max_t(loff_t, startoff, b_offset); | |
1166 | unlock_page(page); | |
1167 | goto out; | |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * We either searching data but nothing was found, or | |
1172 | * searching hole but found a data buffer. In either | |
1173 | * case, probably the next page contains the desired | |
1174 | * things, update the last offset to it so. | |
1175 | */ | |
1176 | lastoff = page_offset(page) + PAGE_SIZE; | |
1177 | unlock_page(page); | |
1178 | } | |
1179 | ||
1180 | /* | |
1181 | * The number of returned pages less than our desired, search | |
1182 | * done. In this case, nothing was found for searching data, | |
1183 | * but we found a hole behind the last offset. | |
1184 | */ | |
1185 | if (nr_pages < want) { | |
1186 | if (type == HOLE_OFF) { | |
1187 | *offset = lastoff; | |
1188 | found = true; | |
1189 | } | |
1190 | break; | |
1191 | } | |
1192 | ||
1193 | index = pvec.pages[i - 1]->index + 1; | |
1194 | pagevec_release(&pvec); | |
1195 | } while (index <= end); | |
1196 | ||
1197 | out: | |
1198 | pagevec_release(&pvec); | |
1199 | return found; | |
1200 | } | |
1201 | ||
3fe3e6b1 JL |
1202 | STATIC loff_t |
1203 | xfs_seek_data( | |
1204 | struct file *file, | |
834ab122 | 1205 | loff_t start) |
3fe3e6b1 JL |
1206 | { |
1207 | struct inode *inode = file->f_mapping->host; | |
1208 | struct xfs_inode *ip = XFS_I(inode); | |
1209 | struct xfs_mount *mp = ip->i_mount; | |
3fe3e6b1 JL |
1210 | loff_t uninitialized_var(offset); |
1211 | xfs_fsize_t isize; | |
1212 | xfs_fileoff_t fsbno; | |
1213 | xfs_filblks_t end; | |
1214 | uint lock; | |
1215 | int error; | |
1216 | ||
1217 | lock = xfs_ilock_map_shared(ip); | |
1218 | ||
1219 | isize = i_size_read(inode); | |
1220 | if (start >= isize) { | |
1221 | error = ENXIO; | |
1222 | goto out_unlock; | |
1223 | } | |
1224 | ||
3fe3e6b1 JL |
1225 | /* |
1226 | * Try to read extents from the first block indicated | |
1227 | * by fsbno to the end block of the file. | |
1228 | */ | |
52f1acc8 | 1229 | fsbno = XFS_B_TO_FSBT(mp, start); |
3fe3e6b1 | 1230 | end = XFS_B_TO_FSB(mp, isize); |
52f1acc8 JL |
1231 | for (;;) { |
1232 | struct xfs_bmbt_irec map[2]; | |
1233 | int nmap = 2; | |
1234 | unsigned int i; | |
3fe3e6b1 | 1235 | |
52f1acc8 JL |
1236 | error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, |
1237 | XFS_BMAPI_ENTIRE); | |
1238 | if (error) | |
1239 | goto out_unlock; | |
3fe3e6b1 | 1240 | |
52f1acc8 JL |
1241 | /* No extents at given offset, must be beyond EOF */ |
1242 | if (nmap == 0) { | |
1243 | error = ENXIO; | |
1244 | goto out_unlock; | |
1245 | } | |
1246 | ||
1247 | for (i = 0; i < nmap; i++) { | |
1248 | offset = max_t(loff_t, start, | |
1249 | XFS_FSB_TO_B(mp, map[i].br_startoff)); | |
1250 | ||
1251 | /* Landed in a data extent */ | |
1252 | if (map[i].br_startblock == DELAYSTARTBLOCK || | |
1253 | (map[i].br_state == XFS_EXT_NORM && | |
1254 | !isnullstartblock(map[i].br_startblock))) | |
1255 | goto out; | |
1256 | ||
1257 | /* | |
1258 | * Landed in an unwritten extent, try to search data | |
1259 | * from page cache. | |
1260 | */ | |
1261 | if (map[i].br_state == XFS_EXT_UNWRITTEN) { | |
1262 | if (xfs_find_get_desired_pgoff(inode, &map[i], | |
1263 | DATA_OFF, &offset)) | |
1264 | goto out; | |
1265 | } | |
1266 | } | |
1267 | ||
1268 | /* | |
1269 | * map[0] is hole or its an unwritten extent but | |
1270 | * without data in page cache. Probably means that | |
1271 | * we are reading after EOF if nothing in map[1]. | |
1272 | */ | |
3fe3e6b1 JL |
1273 | if (nmap == 1) { |
1274 | error = ENXIO; | |
1275 | goto out_unlock; | |
1276 | } | |
1277 | ||
52f1acc8 JL |
1278 | ASSERT(i > 1); |
1279 | ||
1280 | /* | |
1281 | * Nothing was found, proceed to the next round of search | |
1282 | * if reading offset not beyond or hit EOF. | |
1283 | */ | |
1284 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; | |
1285 | start = XFS_FSB_TO_B(mp, fsbno); | |
1286 | if (start >= isize) { | |
1287 | error = ENXIO; | |
1288 | goto out_unlock; | |
1289 | } | |
3fe3e6b1 JL |
1290 | } |
1291 | ||
52f1acc8 | 1292 | out: |
46a1c2c7 | 1293 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
3fe3e6b1 JL |
1294 | |
1295 | out_unlock: | |
1296 | xfs_iunlock_map_shared(ip, lock); | |
1297 | ||
1298 | if (error) | |
1299 | return -error; | |
1300 | return offset; | |
1301 | } | |
1302 | ||
1303 | STATIC loff_t | |
1304 | xfs_seek_hole( | |
1305 | struct file *file, | |
834ab122 | 1306 | loff_t start) |
3fe3e6b1 JL |
1307 | { |
1308 | struct inode *inode = file->f_mapping->host; | |
1309 | struct xfs_inode *ip = XFS_I(inode); | |
1310 | struct xfs_mount *mp = ip->i_mount; | |
1311 | loff_t uninitialized_var(offset); | |
3fe3e6b1 JL |
1312 | xfs_fsize_t isize; |
1313 | xfs_fileoff_t fsbno; | |
b686d1f7 | 1314 | xfs_filblks_t end; |
3fe3e6b1 JL |
1315 | uint lock; |
1316 | int error; | |
1317 | ||
1318 | if (XFS_FORCED_SHUTDOWN(mp)) | |
1319 | return -XFS_ERROR(EIO); | |
1320 | ||
1321 | lock = xfs_ilock_map_shared(ip); | |
1322 | ||
1323 | isize = i_size_read(inode); | |
1324 | if (start >= isize) { | |
1325 | error = ENXIO; | |
1326 | goto out_unlock; | |
1327 | } | |
1328 | ||
1329 | fsbno = XFS_B_TO_FSBT(mp, start); | |
b686d1f7 JL |
1330 | end = XFS_B_TO_FSB(mp, isize); |
1331 | ||
1332 | for (;;) { | |
1333 | struct xfs_bmbt_irec map[2]; | |
1334 | int nmap = 2; | |
1335 | unsigned int i; | |
1336 | ||
1337 | error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap, | |
1338 | XFS_BMAPI_ENTIRE); | |
1339 | if (error) | |
1340 | goto out_unlock; | |
1341 | ||
1342 | /* No extents at given offset, must be beyond EOF */ | |
1343 | if (nmap == 0) { | |
1344 | error = ENXIO; | |
1345 | goto out_unlock; | |
1346 | } | |
1347 | ||
1348 | for (i = 0; i < nmap; i++) { | |
1349 | offset = max_t(loff_t, start, | |
1350 | XFS_FSB_TO_B(mp, map[i].br_startoff)); | |
1351 | ||
1352 | /* Landed in a hole */ | |
1353 | if (map[i].br_startblock == HOLESTARTBLOCK) | |
1354 | goto out; | |
1355 | ||
1356 | /* | |
1357 | * Landed in an unwritten extent, try to search hole | |
1358 | * from page cache. | |
1359 | */ | |
1360 | if (map[i].br_state == XFS_EXT_UNWRITTEN) { | |
1361 | if (xfs_find_get_desired_pgoff(inode, &map[i], | |
1362 | HOLE_OFF, &offset)) | |
1363 | goto out; | |
1364 | } | |
1365 | } | |
3fe3e6b1 | 1366 | |
3fe3e6b1 | 1367 | /* |
b686d1f7 JL |
1368 | * map[0] contains data or its unwritten but contains |
1369 | * data in page cache, probably means that we are | |
1370 | * reading after EOF. We should fix offset to point | |
1371 | * to the end of the file(i.e., there is an implicit | |
1372 | * hole at the end of any file). | |
3fe3e6b1 | 1373 | */ |
b686d1f7 JL |
1374 | if (nmap == 1) { |
1375 | offset = isize; | |
1376 | break; | |
1377 | } | |
1378 | ||
1379 | ASSERT(i > 1); | |
1380 | ||
1381 | /* | |
1382 | * Both mappings contains data, proceed to the next round of | |
1383 | * search if the current reading offset not beyond or hit EOF. | |
1384 | */ | |
1385 | fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; | |
1386 | start = XFS_FSB_TO_B(mp, fsbno); | |
1387 | if (start >= isize) { | |
1388 | offset = isize; | |
1389 | break; | |
1390 | } | |
3fe3e6b1 JL |
1391 | } |
1392 | ||
b686d1f7 JL |
1393 | out: |
1394 | /* | |
1395 | * At this point, we must have found a hole. However, the returned | |
1396 | * offset may be bigger than the file size as it may be aligned to | |
1397 | * page boundary for unwritten extents, we need to deal with this | |
1398 | * situation in particular. | |
1399 | */ | |
1400 | offset = min_t(loff_t, offset, isize); | |
46a1c2c7 | 1401 | offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); |
3fe3e6b1 JL |
1402 | |
1403 | out_unlock: | |
1404 | xfs_iunlock_map_shared(ip, lock); | |
1405 | ||
1406 | if (error) | |
1407 | return -error; | |
1408 | return offset; | |
1409 | } | |
1410 | ||
1411 | STATIC loff_t | |
1412 | xfs_file_llseek( | |
1413 | struct file *file, | |
1414 | loff_t offset, | |
1415 | int origin) | |
1416 | { | |
1417 | switch (origin) { | |
1418 | case SEEK_END: | |
1419 | case SEEK_CUR: | |
1420 | case SEEK_SET: | |
1421 | return generic_file_llseek(file, offset, origin); | |
1422 | case SEEK_DATA: | |
834ab122 | 1423 | return xfs_seek_data(file, offset); |
3fe3e6b1 | 1424 | case SEEK_HOLE: |
834ab122 | 1425 | return xfs_seek_hole(file, offset); |
3fe3e6b1 JL |
1426 | default: |
1427 | return -EINVAL; | |
1428 | } | |
1429 | } | |
1430 | ||
4b6f5d20 | 1431 | const struct file_operations xfs_file_operations = { |
3fe3e6b1 | 1432 | .llseek = xfs_file_llseek, |
1da177e4 | 1433 | .read = do_sync_read, |
bb3f724e | 1434 | .write = do_sync_write, |
3562fd45 NS |
1435 | .aio_read = xfs_file_aio_read, |
1436 | .aio_write = xfs_file_aio_write, | |
1b895840 NS |
1437 | .splice_read = xfs_file_splice_read, |
1438 | .splice_write = xfs_file_splice_write, | |
3562fd45 | 1439 | .unlocked_ioctl = xfs_file_ioctl, |
1da177e4 | 1440 | #ifdef CONFIG_COMPAT |
3562fd45 | 1441 | .compat_ioctl = xfs_file_compat_ioctl, |
1da177e4 | 1442 | #endif |
3562fd45 NS |
1443 | .mmap = xfs_file_mmap, |
1444 | .open = xfs_file_open, | |
1445 | .release = xfs_file_release, | |
1446 | .fsync = xfs_file_fsync, | |
2fe17c10 | 1447 | .fallocate = xfs_file_fallocate, |
1da177e4 LT |
1448 | }; |
1449 | ||
4b6f5d20 | 1450 | const struct file_operations xfs_dir_file_operations = { |
f999a5bf | 1451 | .open = xfs_dir_open, |
1da177e4 | 1452 | .read = generic_read_dir, |
b8227554 | 1453 | .iterate = xfs_file_readdir, |
59af1584 | 1454 | .llseek = generic_file_llseek, |
3562fd45 | 1455 | .unlocked_ioctl = xfs_file_ioctl, |
d3870398 | 1456 | #ifdef CONFIG_COMPAT |
3562fd45 | 1457 | .compat_ioctl = xfs_file_compat_ioctl, |
d3870398 | 1458 | #endif |
1da2f2db | 1459 | .fsync = xfs_dir_fsync, |
1da177e4 LT |
1460 | }; |
1461 | ||
f0f37e2f | 1462 | static const struct vm_operations_struct xfs_file_vm_ops = { |
54cb8821 | 1463 | .fault = filemap_fault, |
4f57dbc6 | 1464 | .page_mkwrite = xfs_vm_page_mkwrite, |
0b173bc4 | 1465 | .remap_pages = generic_file_remap_pages, |
6fac0cb4 | 1466 | }; |